diff options
Diffstat (limited to 'module/lib')
49 files changed, 9504 insertions, 9802 deletions
diff --git a/module/lib/BeautifulSoup.py b/module/lib/BeautifulSoup.py deleted file mode 100644 index 55567f588..000000000 --- a/module/lib/BeautifulSoup.py +++ /dev/null @@ -1,2012 +0,0 @@ -"""Beautiful Soup -Elixir and Tonic -"The Screen-Scraper's Friend" -http://www.crummy.com/software/BeautifulSoup/ - -Beautiful Soup parses a (possibly invalid) XML or HTML document into a -tree representation. It provides methods and Pythonic idioms that make -it easy to navigate, search, and modify the tree. - -A well-formed XML/HTML document yields a well-formed data -structure. An ill-formed XML/HTML document yields a correspondingly -ill-formed data structure. If your document is only locally -well-formed, you can use this library to find and process the -well-formed part of it. - -Beautiful Soup works with Python 2.2 and up. It has no external -dependencies, but you'll have more success at converting data to UTF-8 -if you also install these three packages: - -* chardet, for auto-detecting character encodings - http://chardet.feedparser.org/ -* cjkcodecs and iconv_codec, which add more encodings to the ones supported - by stock Python. - http://cjkpython.i18n.org/ - -Beautiful Soup defines classes for two main parsing strategies: - - * BeautifulStoneSoup, for parsing XML, SGML, or your domain-specific - language that kind of looks like XML. - - * BeautifulSoup, for parsing run-of-the-mill HTML code, be it valid - or invalid. This class has web browser-like heuristics for - obtaining a sensible parse tree in the face of common HTML errors. - -Beautiful Soup also defines a class (UnicodeDammit) for autodetecting -the encoding of an HTML or XML document, and converting it to -Unicode. Much of this code is taken from Mark Pilgrim's Universal Feed Parser. - -For more than you ever wanted to know about Beautiful Soup, see the -documentation: -http://www.crummy.com/software/BeautifulSoup/documentation.html - -Here, have some legalese: - -Copyright (c) 2004-2010, Leonard Richardson - -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided - with the distribution. - - * Neither the name of the the Beautiful Soup Consortium and All - Night Kosher Bakery nor the names of its contributors may be - used to endorse or promote products derived from this software - without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE, DAMMIT. - -""" -from __future__ import generators - -__author__ = "Leonard Richardson (leonardr@segfault.org)" -__version__ = "3.0.8.1" -__copyright__ = "Copyright (c) 2004-2010 Leonard Richardson" -__license__ = "New-style BSD" - -from sgmllib import SGMLParser, SGMLParseError -import codecs -import markupbase -import types -import re -import sgmllib -try: - from htmlentitydefs import name2codepoint -except ImportError: - name2codepoint = {} -try: - set -except NameError: - from sets import Set as set - -#These hacks make Beautiful Soup able to parse XML with namespaces -sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*') -markupbase._declname_match = re.compile(r'[a-zA-Z][-_.:a-zA-Z0-9]*\s*').match - -DEFAULT_OUTPUT_ENCODING = "utf-8" - -def _match_css_class(str): - """Build a RE to match the given CSS class.""" - return re.compile(r"(^|.*\s)%s($|\s)" % str) - -# First, the classes that represent markup elements. - -class PageElement(object): - """Contains the navigational information for some part of the page - (either a tag or a piece of text)""" - - def setup(self, parent=None, previous=None): - """Sets up the initial relations between this element and - other elements.""" - self.parent = parent - self.previous = previous - self.next = None - self.previousSibling = None - self.nextSibling = None - if self.parent and self.parent.contents: - self.previousSibling = self.parent.contents[-1] - self.previousSibling.nextSibling = self - - def replaceWith(self, replaceWith): - oldParent = self.parent - myIndex = self.parent.index(self) - if hasattr(replaceWith, "parent")\ - and replaceWith.parent is self.parent: - # We're replacing this element with one of its siblings. - index = replaceWith.parent.index(replaceWith) - if index and index < myIndex: - # Furthermore, it comes before this element. That - # means that when we extract it, the index of this - # element will change. - myIndex = myIndex - 1 - self.extract() - oldParent.insert(myIndex, replaceWith) - - def replaceWithChildren(self): - myParent = self.parent - myIndex = self.parent.index(self) - self.extract() - reversedChildren = list(self.contents) - reversedChildren.reverse() - for child in reversedChildren: - myParent.insert(myIndex, child) - - def extract(self): - """Destructively rips this element out of the tree.""" - if self.parent: - try: - del self.parent.contents[self.parent.index(self)] - except ValueError: - pass - - #Find the two elements that would be next to each other if - #this element (and any children) hadn't been parsed. Connect - #the two. - lastChild = self._lastRecursiveChild() - nextElement = lastChild.next - - if self.previous: - self.previous.next = nextElement - if nextElement: - nextElement.previous = self.previous - self.previous = None - lastChild.next = None - - self.parent = None - if self.previousSibling: - self.previousSibling.nextSibling = self.nextSibling - if self.nextSibling: - self.nextSibling.previousSibling = self.previousSibling - self.previousSibling = self.nextSibling = None - return self - - def _lastRecursiveChild(self): - "Finds the last element beneath this object to be parsed." - lastChild = self - while hasattr(lastChild, 'contents') and lastChild.contents: - lastChild = lastChild.contents[-1] - return lastChild - - def insert(self, position, newChild): - if isinstance(newChild, basestring) \ - and not isinstance(newChild, NavigableString): - newChild = NavigableString(newChild) - - position = min(position, len(self.contents)) - if hasattr(newChild, 'parent') and newChild.parent is not None: - # We're 'inserting' an element that's already one - # of this object's children. - if newChild.parent is self: - index = self.index(newChild) - if index > position: - # Furthermore we're moving it further down the - # list of this object's children. That means that - # when we extract this element, our target index - # will jump down one. - position = position - 1 - newChild.extract() - - newChild.parent = self - previousChild = None - if position == 0: - newChild.previousSibling = None - newChild.previous = self - else: - previousChild = self.contents[position-1] - newChild.previousSibling = previousChild - newChild.previousSibling.nextSibling = newChild - newChild.previous = previousChild._lastRecursiveChild() - if newChild.previous: - newChild.previous.next = newChild - - newChildsLastElement = newChild._lastRecursiveChild() - - if position >= len(self.contents): - newChild.nextSibling = None - - parent = self - parentsNextSibling = None - while not parentsNextSibling: - parentsNextSibling = parent.nextSibling - parent = parent.parent - if not parent: # This is the last element in the document. - break - if parentsNextSibling: - newChildsLastElement.next = parentsNextSibling - else: - newChildsLastElement.next = None - else: - nextChild = self.contents[position] - newChild.nextSibling = nextChild - if newChild.nextSibling: - newChild.nextSibling.previousSibling = newChild - newChildsLastElement.next = nextChild - - if newChildsLastElement.next: - newChildsLastElement.next.previous = newChildsLastElement - self.contents.insert(position, newChild) - - def append(self, tag): - """Appends the given tag to the contents of this tag.""" - self.insert(len(self.contents), tag) - - def findNext(self, name=None, attrs={}, text=None, **kwargs): - """Returns the first item that matches the given criteria and - appears after this Tag in the document.""" - return self._findOne(self.findAllNext, name, attrs, text, **kwargs) - - def findAllNext(self, name=None, attrs={}, text=None, limit=None, - **kwargs): - """Returns all items that match the given criteria and appear - after this Tag in the document.""" - return self._findAll(name, attrs, text, limit, self.nextGenerator, - **kwargs) - - def findNextSibling(self, name=None, attrs={}, text=None, **kwargs): - """Returns the closest sibling to this Tag that matches the - given criteria and appears after this Tag in the document.""" - return self._findOne(self.findNextSiblings, name, attrs, text, - **kwargs) - - def findNextSiblings(self, name=None, attrs={}, text=None, limit=None, - **kwargs): - """Returns the siblings of this Tag that match the given - criteria and appear after this Tag in the document.""" - return self._findAll(name, attrs, text, limit, - self.nextSiblingGenerator, **kwargs) - fetchNextSiblings = findNextSiblings # Compatibility with pre-3.x - - def findPrevious(self, name=None, attrs={}, text=None, **kwargs): - """Returns the first item that matches the given criteria and - appears before this Tag in the document.""" - return self._findOne(self.findAllPrevious, name, attrs, text, **kwargs) - - def findAllPrevious(self, name=None, attrs={}, text=None, limit=None, - **kwargs): - """Returns all items that match the given criteria and appear - before this Tag in the document.""" - return self._findAll(name, attrs, text, limit, self.previousGenerator, - **kwargs) - fetchPrevious = findAllPrevious # Compatibility with pre-3.x - - def findPreviousSibling(self, name=None, attrs={}, text=None, **kwargs): - """Returns the closest sibling to this Tag that matches the - given criteria and appears before this Tag in the document.""" - return self._findOne(self.findPreviousSiblings, name, attrs, text, - **kwargs) - - def findPreviousSiblings(self, name=None, attrs={}, text=None, - limit=None, **kwargs): - """Returns the siblings of this Tag that match the given - criteria and appear before this Tag in the document.""" - return self._findAll(name, attrs, text, limit, - self.previousSiblingGenerator, **kwargs) - fetchPreviousSiblings = findPreviousSiblings # Compatibility with pre-3.x - - def findParent(self, name=None, attrs={}, **kwargs): - """Returns the closest parent of this Tag that matches the given - criteria.""" - # NOTE: We can't use _findOne because findParents takes a different - # set of arguments. - r = None - l = self.findParents(name, attrs, 1) - if l: - r = l[0] - return r - - def findParents(self, name=None, attrs={}, limit=None, **kwargs): - """Returns the parents of this Tag that match the given - criteria.""" - - return self._findAll(name, attrs, None, limit, self.parentGenerator, - **kwargs) - fetchParents = findParents # Compatibility with pre-3.x - - #These methods do the real heavy lifting. - - def _findOne(self, method, name, attrs, text, **kwargs): - r = None - l = method(name, attrs, text, 1, **kwargs) - if l: - r = l[0] - return r - - def _findAll(self, name, attrs, text, limit, generator, **kwargs): - "Iterates over a generator looking for things that match." - - if isinstance(name, SoupStrainer): - strainer = name - # (Possibly) special case some findAll*(...) searches - elif text is None and not limit and not attrs and not kwargs: - # findAll*(True) - if name is True: - return [element for element in generator() - if isinstance(element, Tag)] - # findAll*('tag-name') - elif isinstance(name, basestring): - return [element for element in generator() - if isinstance(element, Tag) and - element.name == name] - else: - strainer = SoupStrainer(name, attrs, text, **kwargs) - # Build a SoupStrainer - else: - strainer = SoupStrainer(name, attrs, text, **kwargs) - results = ResultSet(strainer) - g = generator() - while True: - try: - i = g.next() - except StopIteration: - break - if i: - found = strainer.search(i) - if found: - results.append(found) - if limit and len(results) >= limit: - break - return results - - #These Generators can be used to navigate starting from both - #NavigableStrings and Tags. - def nextGenerator(self): - i = self - while i is not None: - i = i.next - yield i - - def nextSiblingGenerator(self): - i = self - while i is not None: - i = i.nextSibling - yield i - - def previousGenerator(self): - i = self - while i is not None: - i = i.previous - yield i - - def previousSiblingGenerator(self): - i = self - while i is not None: - i = i.previousSibling - yield i - - def parentGenerator(self): - i = self - while i is not None: - i = i.parent - yield i - - # Utility methods - def substituteEncoding(self, str, encoding=None): - encoding = encoding or "utf-8" - return str.replace("%SOUP-ENCODING%", encoding) - - def toEncoding(self, s, encoding=None): - """Encodes an object to a string in some encoding, or to Unicode. - .""" - if isinstance(s, unicode): - if encoding: - s = s.encode(encoding) - elif isinstance(s, str): - if encoding: - s = s.encode(encoding) - else: - s = unicode(s) - else: - if encoding: - s = self.toEncoding(str(s), encoding) - else: - s = unicode(s) - return s - -class NavigableString(unicode, PageElement): - - def __new__(cls, value): - """Create a new NavigableString. - - When unpickling a NavigableString, this method is called with - the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be - passed in to the superclass's __new__ or the superclass won't know - how to handle non-ASCII characters. - """ - if isinstance(value, unicode): - return unicode.__new__(cls, value) - return unicode.__new__(cls, value, DEFAULT_OUTPUT_ENCODING) - - def __getnewargs__(self): - return (NavigableString.__str__(self),) - - def __getattr__(self, attr): - """text.string gives you text. This is for backwards - compatibility for Navigable*String, but for CData* it lets you - get the string without the CData wrapper.""" - if attr == 'string': - return self - else: - raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__.__name__, attr) - - def __unicode__(self): - return str(self).decode(DEFAULT_OUTPUT_ENCODING) - - def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING): - if encoding: - return self.encode(encoding) - else: - return self - -class CData(NavigableString): - - def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING): - return "<![CDATA[%s]]>" % NavigableString.__str__(self, encoding) - -class ProcessingInstruction(NavigableString): - def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING): - output = self - if "%SOUP-ENCODING%" in output: - output = self.substituteEncoding(output, encoding) - return "<?%s?>" % self.toEncoding(output, encoding) - -class Comment(NavigableString): - def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING): - return "<!--%s-->" % NavigableString.__str__(self, encoding) - -class Declaration(NavigableString): - def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING): - return "<!%s>" % NavigableString.__str__(self, encoding) - -class Tag(PageElement): - - """Represents a found HTML tag with its attributes and contents.""" - - def _invert(h): - "Cheap function to invert a hash." - i = {} - for k,v in h.items(): - i[v] = k - return i - - XML_ENTITIES_TO_SPECIAL_CHARS = { "apos" : "'", - "quot" : '"', - "amp" : "&", - "lt" : "<", - "gt" : ">" } - - XML_SPECIAL_CHARS_TO_ENTITIES = _invert(XML_ENTITIES_TO_SPECIAL_CHARS) - - def _convertEntities(self, match): - """Used in a call to re.sub to replace HTML, XML, and numeric - entities with the appropriate Unicode characters. If HTML - entities are being converted, any unrecognized entities are - escaped.""" - x = match.group(1) - if self.convertHTMLEntities and x in name2codepoint: - return unichr(name2codepoint[x]) - elif x in self.XML_ENTITIES_TO_SPECIAL_CHARS: - if self.convertXMLEntities: - return self.XML_ENTITIES_TO_SPECIAL_CHARS[x] - else: - return u'&%s;' % x - elif len(x) > 0 and x[0] == '#': - # Handle numeric entities - if len(x) > 1 and x[1] == 'x': - return unichr(int(x[2:], 16)) - else: - return unichr(int(x[1:])) - - elif self.escapeUnrecognizedEntities: - return u'&%s;' % x - else: - return u'&%s;' % x - - def __init__(self, parser, name, attrs=None, parent=None, - previous=None): - "Basic constructor." - - # We don't actually store the parser object: that lets extracted - # chunks be garbage-collected - self.parserClass = parser.__class__ - self.isSelfClosing = parser.isSelfClosingTag(name) - self.name = name - if attrs is None: - attrs = [] - self.attrs = attrs - self.contents = [] - self.setup(parent, previous) - self.hidden = False - self.containsSubstitutions = False - self.convertHTMLEntities = parser.convertHTMLEntities - self.convertXMLEntities = parser.convertXMLEntities - self.escapeUnrecognizedEntities = parser.escapeUnrecognizedEntities - - # Convert any HTML, XML, or numeric entities in the attribute values. - convert = lambda(k, val): (k, - re.sub("&(#\d+|#x[0-9a-fA-F]+|\w+);", - self._convertEntities, - val)) - self.attrs = map(convert, self.attrs) - - def getString(self): - if (len(self.contents) == 1 - and isinstance(self.contents[0], NavigableString)): - return self.contents[0] - - def setString(self, string): - """Replace the contents of the tag with a string""" - self.clear() - self.append(string) - - string = property(getString, setString) - - def getText(self, separator=u""): - if not len(self.contents): - return u"" - stopNode = self._lastRecursiveChild().next - strings = [] - current = self.contents[0] - while current is not stopNode: - if isinstance(current, NavigableString): - strings.append(current.strip()) - current = current.next - return separator.join(strings) - - text = property(getText) - - def get(self, key, default=None): - """Returns the value of the 'key' attribute for the tag, or - the value given for 'default' if it doesn't have that - attribute.""" - return self._getAttrMap().get(key, default) - - def clear(self): - """Extract all children.""" - for child in self.contents[:]: - child.extract() - - def index(self, element): - for i, child in enumerate(self.contents): - if child is element: - return i - raise ValueError("Tag.index: element not in tag") - - def has_key(self, key): - return self._getAttrMap().has_key(key) - - def __getitem__(self, key): - """tag[key] returns the value of the 'key' attribute for the tag, - and throws an exception if it's not there.""" - return self._getAttrMap()[key] - - def __iter__(self): - "Iterating over a tag iterates over its contents." - return iter(self.contents) - - def __len__(self): - "The length of a tag is the length of its list of contents." - return len(self.contents) - - def __contains__(self, x): - return x in self.contents - - def __nonzero__(self): - "A tag is non-None even if it has no contents." - return True - - def __setitem__(self, key, value): - """Setting tag[key] sets the value of the 'key' attribute for the - tag.""" - self._getAttrMap() - self.attrMap[key] = value - found = False - for i in range(0, len(self.attrs)): - if self.attrs[i][0] == key: - self.attrs[i] = (key, value) - found = True - if not found: - self.attrs.append((key, value)) - self._getAttrMap()[key] = value - - def __delitem__(self, key): - "Deleting tag[key] deletes all 'key' attributes for the tag." - for item in self.attrs: - if item[0] == key: - self.attrs.remove(item) - #We don't break because bad HTML can define the same - #attribute multiple times. - self._getAttrMap() - if self.attrMap.has_key(key): - del self.attrMap[key] - - def __call__(self, *args, **kwargs): - """Calling a tag like a function is the same as calling its - findAll() method. Eg. tag('a') returns a list of all the A tags - found within this tag.""" - return apply(self.findAll, args, kwargs) - - def __getattr__(self, tag): - #print "Getattr %s.%s" % (self.__class__, tag) - if len(tag) > 3 and tag.rfind('Tag') == len(tag)-3: - return self.find(tag[:-3]) - elif tag.find('__') != 0: - return self.find(tag) - raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__, tag) - - def __eq__(self, other): - """Returns true iff this tag has the same name, the same attributes, - and the same contents (recursively) as the given tag. - - NOTE: right now this will return false if two tags have the - same attributes in a different order. Should this be fixed?""" - if other is self: - return True - if not hasattr(other, 'name') or not hasattr(other, 'attrs') or not hasattr(other, 'contents') or self.name != other.name or self.attrs != other.attrs or len(self) != len(other): - return False - for i in range(0, len(self.contents)): - if self.contents[i] != other.contents[i]: - return False - return True - - def __ne__(self, other): - """Returns true iff this tag is not identical to the other tag, - as defined in __eq__.""" - return not self == other - - def __repr__(self, encoding=DEFAULT_OUTPUT_ENCODING): - """Renders this tag as a string.""" - return self.__str__(encoding) - - def __unicode__(self): - return self.__str__(None) - - BARE_AMPERSAND_OR_BRACKET = re.compile("([<>]|" - + "&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)" - + ")") - - def _sub_entity(self, x): - """Used with a regular expression to substitute the - appropriate XML entity for an XML special character.""" - return "&" + self.XML_SPECIAL_CHARS_TO_ENTITIES[x.group(0)[0]] + ";" - - def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING, - prettyPrint=False, indentLevel=0): - """Returns a string or Unicode representation of this tag and - its contents. To get Unicode, pass None for encoding. - - NOTE: since Python's HTML parser consumes whitespace, this - method is not certain to reproduce the whitespace present in - the original string.""" - - encodedName = self.toEncoding(self.name, encoding) - - attrs = [] - if self.attrs: - for key, val in self.attrs: - fmt = '%s="%s"' - if isinstance(val, basestring): - if self.containsSubstitutions and '%SOUP-ENCODING%' in val: - val = self.substituteEncoding(val, encoding) - - # The attribute value either: - # - # * Contains no embedded double quotes or single quotes. - # No problem: we enclose it in double quotes. - # * Contains embedded single quotes. No problem: - # double quotes work here too. - # * Contains embedded double quotes. No problem: - # we enclose it in single quotes. - # * Embeds both single _and_ double quotes. This - # can't happen naturally, but it can happen if - # you modify an attribute value after parsing - # the document. Now we have a bit of a - # problem. We solve it by enclosing the - # attribute in single quotes, and escaping any - # embedded single quotes to XML entities. - if '"' in val: - fmt = "%s='%s'" - if "'" in val: - # TODO: replace with apos when - # appropriate. - val = val.replace("'", "&squot;") - - # Now we're okay w/r/t quotes. But the attribute - # value might also contain angle brackets, or - # ampersands that aren't part of entities. We need - # to escape those to XML entities too. - val = self.BARE_AMPERSAND_OR_BRACKET.sub(self._sub_entity, val) - - attrs.append(fmt % (self.toEncoding(key, encoding), - self.toEncoding(val, encoding))) - close = '' - closeTag = '' - if self.isSelfClosing: - close = ' /' - else: - closeTag = '</%s>' % encodedName - - indentTag, indentContents = 0, 0 - if prettyPrint: - indentTag = indentLevel - space = (' ' * (indentTag-1)) - indentContents = indentTag + 1 - contents = self.renderContents(encoding, prettyPrint, indentContents) - if self.hidden: - s = contents - else: - s = [] - attributeString = '' - if attrs: - attributeString = ' ' + ' '.join(attrs) - if prettyPrint: - s.append(space) - s.append('<%s%s%s>' % (encodedName, attributeString, close)) - if prettyPrint: - s.append("\n") - s.append(contents) - if prettyPrint and contents and contents[-1] != "\n": - s.append("\n") - if prettyPrint and closeTag: - s.append(space) - s.append(closeTag) - if prettyPrint and closeTag and self.nextSibling: - s.append("\n") - s = ''.join(s) - return s - - def decompose(self): - """Recursively destroys the contents of this tree.""" - self.extract() - if len(self.contents) == 0: - return - current = self.contents[0] - while current is not None: - next = current.next - if isinstance(current, Tag): - del current.contents[:] - current.parent = None - current.previous = None - current.previousSibling = None - current.next = None - current.nextSibling = None - current = next - - def prettify(self, encoding=DEFAULT_OUTPUT_ENCODING): - return self.__str__(encoding, True) - - def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING, - prettyPrint=False, indentLevel=0): - """Renders the contents of this tag as a string in the given - encoding. If encoding is None, returns a Unicode string..""" - s=[] - for c in self: - text = None - if isinstance(c, NavigableString): - text = c.__str__(encoding) - elif isinstance(c, Tag): - s.append(c.__str__(encoding, prettyPrint, indentLevel)) - if text and prettyPrint: - text = text.strip() - if text: - if prettyPrint: - s.append(" " * (indentLevel-1)) - s.append(text) - if prettyPrint: - s.append("\n") - return ''.join(s) - - #Soup methods - - def find(self, name=None, attrs={}, recursive=True, text=None, - **kwargs): - """Return only the first child of this Tag matching the given - criteria.""" - r = None - l = self.findAll(name, attrs, recursive, text, 1, **kwargs) - if l: - r = l[0] - return r - findChild = find - - def findAll(self, name=None, attrs={}, recursive=True, text=None, - limit=None, **kwargs): - """Extracts a list of Tag objects that match the given - criteria. You can specify the name of the Tag and any - attributes you want the Tag to have. - - The value of a key-value pair in the 'attrs' map can be a - string, a list of strings, a regular expression object, or a - callable that takes a string and returns whether or not the - string matches for some custom definition of 'matches'. The - same is true of the tag name.""" - generator = self.recursiveChildGenerator - if not recursive: - generator = self.childGenerator - return self._findAll(name, attrs, text, limit, generator, **kwargs) - findChildren = findAll - - # Pre-3.x compatibility methods - first = find - fetch = findAll - - def fetchText(self, text=None, recursive=True, limit=None): - return self.findAll(text=text, recursive=recursive, limit=limit) - - def firstText(self, text=None, recursive=True): - return self.find(text=text, recursive=recursive) - - #Private methods - - def _getAttrMap(self): - """Initializes a map representation of this tag's attributes, - if not already initialized.""" - if not getattr(self, 'attrMap'): - self.attrMap = {} - for (key, value) in self.attrs: - self.attrMap[key] = value - return self.attrMap - - #Generator methods - def childGenerator(self): - # Just use the iterator from the contents - return iter(self.contents) - - def recursiveChildGenerator(self): - if not len(self.contents): - raise StopIteration - stopNode = self._lastRecursiveChild().next - current = self.contents[0] - while current is not stopNode: - yield current - current = current.next - - -# Next, a couple classes to represent queries and their results. -class SoupStrainer: - """Encapsulates a number of ways of matching a markup element (tag or - text).""" - - def __init__(self, name=None, attrs={}, text=None, **kwargs): - self.name = name - if isinstance(attrs, basestring): - kwargs['class'] = _match_css_class(attrs) - attrs = None - if kwargs: - if attrs: - attrs = attrs.copy() - attrs.update(kwargs) - else: - attrs = kwargs - self.attrs = attrs - self.text = text - - def __str__(self): - if self.text: - return self.text - else: - return "%s|%s" % (self.name, self.attrs) - - def searchTag(self, markupName=None, markupAttrs={}): - found = None - markup = None - if isinstance(markupName, Tag): - markup = markupName - markupAttrs = markup - callFunctionWithTagData = callable(self.name) \ - and not isinstance(markupName, Tag) - - if (not self.name) \ - or callFunctionWithTagData \ - or (markup and self._matches(markup, self.name)) \ - or (not markup and self._matches(markupName, self.name)): - if callFunctionWithTagData: - match = self.name(markupName, markupAttrs) - else: - match = True - markupAttrMap = None - for attr, matchAgainst in self.attrs.items(): - if not markupAttrMap: - if hasattr(markupAttrs, 'get'): - markupAttrMap = markupAttrs - else: - markupAttrMap = {} - for k,v in markupAttrs: - markupAttrMap[k] = v - attrValue = markupAttrMap.get(attr) - if not self._matches(attrValue, matchAgainst): - match = False - break - if match: - if markup: - found = markup - else: - found = markupName - return found - - def search(self, markup): - #print 'looking for %s in %s' % (self, markup) - found = None - # If given a list of items, scan it for a text element that - # matches. - if hasattr(markup, "__iter__") \ - and not isinstance(markup, Tag): - for element in markup: - if isinstance(element, NavigableString) \ - and self.search(element): - found = element - break - # If it's a Tag, make sure its name or attributes match. - # Don't bother with Tags if we're searching for text. - elif isinstance(markup, Tag): - if not self.text: - found = self.searchTag(markup) - # If it's text, make sure the text matches. - elif isinstance(markup, NavigableString) or \ - isinstance(markup, basestring): - if self._matches(markup, self.text): - found = markup - else: - raise Exception, "I don't know how to match against a %s" \ - % markup.__class__ - return found - - def _matches(self, markup, matchAgainst): - #print "Matching %s against %s" % (markup, matchAgainst) - result = False - if matchAgainst is True: - result = markup is not None - elif callable(matchAgainst): - result = matchAgainst(markup) - else: - #Custom match methods take the tag as an argument, but all - #other ways of matching match the tag name as a string. - if isinstance(markup, Tag): - markup = markup.name - if markup and not isinstance(markup, basestring): - markup = unicode(markup) - #Now we know that chunk is either a string, or None. - if hasattr(matchAgainst, 'match'): - # It's a regexp object. - result = markup and matchAgainst.search(markup) - elif hasattr(matchAgainst, '__iter__'): # list-like - result = markup in matchAgainst - elif hasattr(matchAgainst, 'items'): - result = markup.has_key(matchAgainst) - elif matchAgainst and isinstance(markup, basestring): - if isinstance(markup, unicode): - matchAgainst = unicode(matchAgainst) - else: - matchAgainst = str(matchAgainst) - - if not result: - result = matchAgainst == markup - return result - -class ResultSet(list): - """A ResultSet is just a list that keeps track of the SoupStrainer - that created it.""" - def __init__(self, source): - list.__init__([]) - self.source = source - -# Now, some helper functions. - -def buildTagMap(default, *args): - """Turns a list of maps, lists, or scalars into a single map. - Used to build the SELF_CLOSING_TAGS, NESTABLE_TAGS, and - NESTING_RESET_TAGS maps out of lists and partial maps.""" - built = {} - for portion in args: - if hasattr(portion, 'items'): - #It's a map. Merge it. - for k,v in portion.items(): - built[k] = v - elif hasattr(portion, '__iter__'): # is a list - #It's a list. Map each item to the default. - for k in portion: - built[k] = default - else: - #It's a scalar. Map it to the default. - built[portion] = default - return built - -# Now, the parser classes. - -class BeautifulStoneSoup(Tag, SGMLParser): - - """This class contains the basic parser and search code. It defines - a parser that knows nothing about tag behavior except for the - following: - - You can't close a tag without closing all the tags it encloses. - That is, "<foo><bar></foo>" actually means - "<foo><bar></bar></foo>". - - [Another possible explanation is "<foo><bar /></foo>", but since - this class defines no SELF_CLOSING_TAGS, it will never use that - explanation.] - - This class is useful for parsing XML or made-up markup languages, - or when BeautifulSoup makes an assumption counter to what you were - expecting.""" - - SELF_CLOSING_TAGS = {} - NESTABLE_TAGS = {} - RESET_NESTING_TAGS = {} - QUOTE_TAGS = {} - PRESERVE_WHITESPACE_TAGS = [] - - MARKUP_MASSAGE = [(re.compile('(<[^<>]*)/>'), - lambda x: x.group(1) + ' />'), - (re.compile('<!\s+([^<>]*)>'), - lambda x: '<!' + x.group(1) + '>') - ] - - ROOT_TAG_NAME = u'[document]' - - HTML_ENTITIES = "html" - XML_ENTITIES = "xml" - XHTML_ENTITIES = "xhtml" - # TODO: This only exists for backwards-compatibility - ALL_ENTITIES = XHTML_ENTITIES - - # Used when determining whether a text node is all whitespace and - # can be replaced with a single space. A text node that contains - # fancy Unicode spaces (usually non-breaking) should be left - # alone. - STRIP_ASCII_SPACES = { 9: None, 10: None, 12: None, 13: None, 32: None, } - - def __init__(self, markup="", parseOnlyThese=None, fromEncoding=None, - markupMassage=True, smartQuotesTo=XML_ENTITIES, - convertEntities=None, selfClosingTags=None, isHTML=False): - """The Soup object is initialized as the 'root tag', and the - provided markup (which can be a string or a file-like object) - is fed into the underlying parser. - - sgmllib will process most bad HTML, and the BeautifulSoup - class has some tricks for dealing with some HTML that kills - sgmllib, but Beautiful Soup can nonetheless choke or lose data - if your data uses self-closing tags or declarations - incorrectly. - - By default, Beautiful Soup uses regexes to sanitize input, - avoiding the vast majority of these problems. If the problems - don't apply to you, pass in False for markupMassage, and - you'll get better performance. - - The default parser massage techniques fix the two most common - instances of invalid HTML that choke sgmllib: - - <br/> (No space between name of closing tag and tag close) - <! --Comment--> (Extraneous whitespace in declaration) - - You can pass in a custom list of (RE object, replace method) - tuples to get Beautiful Soup to scrub your input the way you - want.""" - - self.parseOnlyThese = parseOnlyThese - self.fromEncoding = fromEncoding - self.smartQuotesTo = smartQuotesTo - self.convertEntities = convertEntities - # Set the rules for how we'll deal with the entities we - # encounter - if self.convertEntities: - # It doesn't make sense to convert encoded characters to - # entities even while you're converting entities to Unicode. - # Just convert it all to Unicode. - self.smartQuotesTo = None - if convertEntities == self.HTML_ENTITIES: - self.convertXMLEntities = False - self.convertHTMLEntities = True - self.escapeUnrecognizedEntities = True - elif convertEntities == self.XHTML_ENTITIES: - self.convertXMLEntities = True - self.convertHTMLEntities = True - self.escapeUnrecognizedEntities = False - elif convertEntities == self.XML_ENTITIES: - self.convertXMLEntities = True - self.convertHTMLEntities = False - self.escapeUnrecognizedEntities = False - else: - self.convertXMLEntities = False - self.convertHTMLEntities = False - self.escapeUnrecognizedEntities = False - - self.instanceSelfClosingTags = buildTagMap(None, selfClosingTags) - SGMLParser.__init__(self) - - if hasattr(markup, 'read'): # It's a file-type object. - markup = markup.read() - self.markup = markup - self.markupMassage = markupMassage - try: - self._feed(isHTML=isHTML) - except StopParsing: - pass - self.markup = None # The markup can now be GCed - - def convert_charref(self, name): - """This method fixes a bug in Python's SGMLParser.""" - try: - n = int(name) - except ValueError: - return - if not 0 <= n <= 127 : # ASCII ends at 127, not 255 - return - return self.convert_codepoint(n) - - def _feed(self, inDocumentEncoding=None, isHTML=False): - # Convert the document to Unicode. - markup = self.markup - if isinstance(markup, unicode): - if not hasattr(self, 'originalEncoding'): - self.originalEncoding = None - else: - dammit = UnicodeDammit\ - (markup, [self.fromEncoding, inDocumentEncoding], - smartQuotesTo=self.smartQuotesTo, isHTML=isHTML) - markup = dammit.unicode - self.originalEncoding = dammit.originalEncoding - self.declaredHTMLEncoding = dammit.declaredHTMLEncoding - if markup: - if self.markupMassage: - if not hasattr(self.markupMassage, "__iter__"): - self.markupMassage = self.MARKUP_MASSAGE - for fix, m in self.markupMassage: - markup = fix.sub(m, markup) - # TODO: We get rid of markupMassage so that the - # soup object can be deepcopied later on. Some - # Python installations can't copy regexes. If anyone - # was relying on the existence of markupMassage, this - # might cause problems. - del(self.markupMassage) - self.reset() - - SGMLParser.feed(self, markup) - # Close out any unfinished strings and close all the open tags. - self.endData() - while self.currentTag.name != self.ROOT_TAG_NAME: - self.popTag() - - def __getattr__(self, methodName): - """This method routes method call requests to either the SGMLParser - superclass or the Tag superclass, depending on the method name.""" - #print "__getattr__ called on %s.%s" % (self.__class__, methodName) - - if methodName.startswith('start_') or methodName.startswith('end_') \ - or methodName.startswith('do_'): - return SGMLParser.__getattr__(self, methodName) - elif not methodName.startswith('__'): - return Tag.__getattr__(self, methodName) - else: - raise AttributeError - - def isSelfClosingTag(self, name): - """Returns true iff the given string is the name of a - self-closing tag according to this parser.""" - return self.SELF_CLOSING_TAGS.has_key(name) \ - or self.instanceSelfClosingTags.has_key(name) - - def reset(self): - Tag.__init__(self, self, self.ROOT_TAG_NAME) - self.hidden = 1 - SGMLParser.reset(self) - self.currentData = [] - self.currentTag = None - self.tagStack = [] - self.quoteStack = [] - self.pushTag(self) - - def popTag(self): - tag = self.tagStack.pop() - - #print "Pop", tag.name - if self.tagStack: - self.currentTag = self.tagStack[-1] - return self.currentTag - - def pushTag(self, tag): - #print "Push", tag.name - if self.currentTag: - self.currentTag.contents.append(tag) - self.tagStack.append(tag) - self.currentTag = self.tagStack[-1] - - def endData(self, containerClass=NavigableString): - if self.currentData: - currentData = u''.join(self.currentData) - if (currentData.translate(self.STRIP_ASCII_SPACES) == '' and - not set([tag.name for tag in self.tagStack]).intersection( - self.PRESERVE_WHITESPACE_TAGS)): - if '\n' in currentData: - currentData = '\n' - else: - currentData = ' ' - self.currentData = [] - if self.parseOnlyThese and len(self.tagStack) <= 1 and \ - (not self.parseOnlyThese.text or \ - not self.parseOnlyThese.search(currentData)): - return - o = containerClass(currentData) - o.setup(self.currentTag, self.previous) - if self.previous: - self.previous.next = o - self.previous = o - self.currentTag.contents.append(o) - - - def _popToTag(self, name, inclusivePop=True): - """Pops the tag stack up to and including the most recent - instance of the given tag. If inclusivePop is false, pops the tag - stack up to but *not* including the most recent instqance of - the given tag.""" - #print "Popping to %s" % name - if name == self.ROOT_TAG_NAME: - return - - numPops = 0 - mostRecentTag = None - for i in range(len(self.tagStack)-1, 0, -1): - if name == self.tagStack[i].name: - numPops = len(self.tagStack)-i - break - if not inclusivePop: - numPops = numPops - 1 - - for i in range(0, numPops): - mostRecentTag = self.popTag() - return mostRecentTag - - def _smartPop(self, name): - - """We need to pop up to the previous tag of this type, unless - one of this tag's nesting reset triggers comes between this - tag and the previous tag of this type, OR unless this tag is a - generic nesting trigger and another generic nesting trigger - comes between this tag and the previous tag of this type. - - Examples: - <p>Foo<b>Bar *<p>* should pop to 'p', not 'b'. - <p>Foo<table>Bar *<p>* should pop to 'table', not 'p'. - <p>Foo<table><tr>Bar *<p>* should pop to 'tr', not 'p'. - - <li><ul><li> *<li>* should pop to 'ul', not the first 'li'. - <tr><table><tr> *<tr>* should pop to 'table', not the first 'tr' - <td><tr><td> *<td>* should pop to 'tr', not the first 'td' - """ - - nestingResetTriggers = self.NESTABLE_TAGS.get(name) - isNestable = nestingResetTriggers is not None - isResetNesting = self.RESET_NESTING_TAGS.has_key(name) - popTo = None - inclusive = True - for i in range(len(self.tagStack)-1, 0, -1): - p = self.tagStack[i] - if (not p or p.name == name) and not isNestable: - #Non-nestable tags get popped to the top or to their - #last occurance. - popTo = name - break - if (nestingResetTriggers is not None - and p.name in nestingResetTriggers) \ - or (nestingResetTriggers is None and isResetNesting - and self.RESET_NESTING_TAGS.has_key(p.name)): - - #If we encounter one of the nesting reset triggers - #peculiar to this tag, or we encounter another tag - #that causes nesting to reset, pop up to but not - #including that tag. - popTo = p.name - inclusive = False - break - p = p.parent - if popTo: - self._popToTag(popTo, inclusive) - - def unknown_starttag(self, name, attrs, selfClosing=0): - #print "Start tag %s: %s" % (name, attrs) - if self.quoteStack: - #This is not a real tag. - #print "<%s> is not real!" % name - attrs = ''.join([' %s="%s"' % (x, y) for x, y in attrs]) - self.handle_data('<%s%s>' % (name, attrs)) - return - self.endData() - - if not self.isSelfClosingTag(name) and not selfClosing: - self._smartPop(name) - - if self.parseOnlyThese and len(self.tagStack) <= 1 \ - and (self.parseOnlyThese.text or not self.parseOnlyThese.searchTag(name, attrs)): - return - - tag = Tag(self, name, attrs, self.currentTag, self.previous) - if self.previous: - self.previous.next = tag - self.previous = tag - self.pushTag(tag) - if selfClosing or self.isSelfClosingTag(name): - self.popTag() - if name in self.QUOTE_TAGS: - #print "Beginning quote (%s)" % name - self.quoteStack.append(name) - self.literal = 1 - return tag - - def unknown_endtag(self, name): - #print "End tag %s" % name - if self.quoteStack and self.quoteStack[-1] != name: - #This is not a real end tag. - #print "</%s> is not real!" % name - self.handle_data('</%s>' % name) - return - self.endData() - self._popToTag(name) - if self.quoteStack and self.quoteStack[-1] == name: - self.quoteStack.pop() - self.literal = (len(self.quoteStack) > 0) - - def handle_data(self, data): - self.currentData.append(data) - - def _toStringSubclass(self, text, subclass): - """Adds a certain piece of text to the tree as a NavigableString - subclass.""" - self.endData() - self.handle_data(text) - self.endData(subclass) - - def handle_pi(self, text): - """Handle a processing instruction as a ProcessingInstruction - object, possibly one with a %SOUP-ENCODING% slot into which an - encoding will be plugged later.""" - if text[:3] == "xml": - text = u"xml version='1.0' encoding='%SOUP-ENCODING%'" - self._toStringSubclass(text, ProcessingInstruction) - - def handle_comment(self, text): - "Handle comments as Comment objects." - self._toStringSubclass(text, Comment) - - def handle_charref(self, ref): - "Handle character references as data." - if self.convertEntities: - data = unichr(int(ref)) - else: - data = '&#%s;' % ref - self.handle_data(data) - - def handle_entityref(self, ref): - """Handle entity references as data, possibly converting known - HTML and/or XML entity references to the corresponding Unicode - characters.""" - data = None - if self.convertHTMLEntities: - try: - data = unichr(name2codepoint[ref]) - except KeyError: - pass - - if not data and self.convertXMLEntities: - data = self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref) - - if not data and self.convertHTMLEntities and \ - not self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref): - # TODO: We've got a problem here. We're told this is - # an entity reference, but it's not an XML entity - # reference or an HTML entity reference. Nonetheless, - # the logical thing to do is to pass it through as an - # unrecognized entity reference. - # - # Except: when the input is "&carol;" this function - # will be called with input "carol". When the input is - # "AT&T", this function will be called with input - # "T". We have no way of knowing whether a semicolon - # was present originally, so we don't know whether - # this is an unknown entity or just a misplaced - # ampersand. - # - # The more common case is a misplaced ampersand, so I - # escape the ampersand and omit the trailing semicolon. - data = "&%s" % ref - if not data: - # This case is different from the one above, because we - # haven't already gone through a supposedly comprehensive - # mapping of entities to Unicode characters. We might not - # have gone through any mapping at all. So the chances are - # very high that this is a real entity, and not a - # misplaced ampersand. - data = "&%s;" % ref - self.handle_data(data) - - def handle_decl(self, data): - "Handle DOCTYPEs and the like as Declaration objects." - self._toStringSubclass(data, Declaration) - - def parse_declaration(self, i): - """Treat a bogus SGML declaration as raw data. Treat a CDATA - declaration as a CData object.""" - j = None - if self.rawdata[i:i+9] == '<![CDATA[': - k = self.rawdata.find(']]>', i) - if k == -1: - k = len(self.rawdata) - data = self.rawdata[i+9:k] - j = k+3 - self._toStringSubclass(data, CData) - else: - try: - j = SGMLParser.parse_declaration(self, i) - except SGMLParseError: - toHandle = self.rawdata[i:] - self.handle_data(toHandle) - j = i + len(toHandle) - return j - -class BeautifulSoup(BeautifulStoneSoup): - - """This parser knows the following facts about HTML: - - * Some tags have no closing tag and should be interpreted as being - closed as soon as they are encountered. - - * The text inside some tags (ie. 'script') may contain tags which - are not really part of the document and which should be parsed - as text, not tags. If you want to parse the text as tags, you can - always fetch it and parse it explicitly. - - * Tag nesting rules: - - Most tags can't be nested at all. For instance, the occurance of - a <p> tag should implicitly close the previous <p> tag. - - <p>Para1<p>Para2 - should be transformed into: - <p>Para1</p><p>Para2 - - Some tags can be nested arbitrarily. For instance, the occurance - of a <blockquote> tag should _not_ implicitly close the previous - <blockquote> tag. - - Alice said: <blockquote>Bob said: <blockquote>Blah - should NOT be transformed into: - Alice said: <blockquote>Bob said: </blockquote><blockquote>Blah - - Some tags can be nested, but the nesting is reset by the - interposition of other tags. For instance, a <tr> tag should - implicitly close the previous <tr> tag within the same <table>, - but not close a <tr> tag in another table. - - <table><tr>Blah<tr>Blah - should be transformed into: - <table><tr>Blah</tr><tr>Blah - but, - <tr>Blah<table><tr>Blah - should NOT be transformed into - <tr>Blah<table></tr><tr>Blah - - Differing assumptions about tag nesting rules are a major source - of problems with the BeautifulSoup class. If BeautifulSoup is not - treating as nestable a tag your page author treats as nestable, - try ICantBelieveItsBeautifulSoup, MinimalSoup, or - BeautifulStoneSoup before writing your own subclass.""" - - def __init__(self, *args, **kwargs): - if not kwargs.has_key('smartQuotesTo'): - kwargs['smartQuotesTo'] = self.HTML_ENTITIES - kwargs['isHTML'] = True - BeautifulStoneSoup.__init__(self, *args, **kwargs) - - SELF_CLOSING_TAGS = buildTagMap(None, - ('br' , 'hr', 'input', 'img', 'meta', - 'spacer', 'link', 'frame', 'base', 'col')) - - PRESERVE_WHITESPACE_TAGS = set(['pre', 'textarea']) - - QUOTE_TAGS = {'script' : None, 'textarea' : None} - - #According to the HTML standard, each of these inline tags can - #contain another tag of the same type. Furthermore, it's common - #to actually use these tags this way. - NESTABLE_INLINE_TAGS = ('span', 'font', 'q', 'object', 'bdo', 'sub', 'sup', - 'center') - - #According to the HTML standard, these block tags can contain - #another tag of the same type. Furthermore, it's common - #to actually use these tags this way. - NESTABLE_BLOCK_TAGS = ('blockquote', 'div', 'fieldset', 'ins', 'del') - - #Lists can contain other lists, but there are restrictions. - NESTABLE_LIST_TAGS = { 'ol' : [], - 'ul' : [], - 'li' : ['ul', 'ol'], - 'dl' : [], - 'dd' : ['dl'], - 'dt' : ['dl'] } - - #Tables can contain other tables, but there are restrictions. - NESTABLE_TABLE_TAGS = {'table' : [], - 'tr' : ['table', 'tbody', 'tfoot', 'thead'], - 'td' : ['tr'], - 'th' : ['tr'], - 'thead' : ['table'], - 'tbody' : ['table'], - 'tfoot' : ['table'], - } - - NON_NESTABLE_BLOCK_TAGS = ('address', 'form', 'p', 'pre') - - #If one of these tags is encountered, all tags up to the next tag of - #this type are popped. - RESET_NESTING_TAGS = buildTagMap(None, NESTABLE_BLOCK_TAGS, 'noscript', - NON_NESTABLE_BLOCK_TAGS, - NESTABLE_LIST_TAGS, - NESTABLE_TABLE_TAGS) - - NESTABLE_TAGS = buildTagMap([], NESTABLE_INLINE_TAGS, NESTABLE_BLOCK_TAGS, - NESTABLE_LIST_TAGS, NESTABLE_TABLE_TAGS) - - # Used to detect the charset in a META tag; see start_meta - CHARSET_RE = re.compile("((^|;)\s*charset=)([^;]*)", re.M) - - def start_meta(self, attrs): - """Beautiful Soup can detect a charset included in a META tag, - try to convert the document to that charset, and re-parse the - document from the beginning.""" - httpEquiv = None - contentType = None - contentTypeIndex = None - tagNeedsEncodingSubstitution = False - - for i in range(0, len(attrs)): - key, value = attrs[i] - key = key.lower() - if key == 'http-equiv': - httpEquiv = value - elif key == 'content': - contentType = value - contentTypeIndex = i - - if httpEquiv and contentType: # It's an interesting meta tag. - match = self.CHARSET_RE.search(contentType) - if match: - if (self.declaredHTMLEncoding is not None or - self.originalEncoding == self.fromEncoding): - # An HTML encoding was sniffed while converting - # the document to Unicode, or an HTML encoding was - # sniffed during a previous pass through the - # document, or an encoding was specified - # explicitly and it worked. Rewrite the meta tag. - def rewrite(match): - return match.group(1) + "%SOUP-ENCODING%" - newAttr = self.CHARSET_RE.sub(rewrite, contentType) - attrs[contentTypeIndex] = (attrs[contentTypeIndex][0], - newAttr) - tagNeedsEncodingSubstitution = True - else: - # This is our first pass through the document. - # Go through it again with the encoding information. - newCharset = match.group(3) - if newCharset and newCharset != self.originalEncoding: - self.declaredHTMLEncoding = newCharset - self._feed(self.declaredHTMLEncoding) - raise StopParsing - pass - tag = self.unknown_starttag("meta", attrs) - if tag and tagNeedsEncodingSubstitution: - tag.containsSubstitutions = True - -class StopParsing(Exception): - pass - -class ICantBelieveItsBeautifulSoup(BeautifulSoup): - - """The BeautifulSoup class is oriented towards skipping over - common HTML errors like unclosed tags. However, sometimes it makes - errors of its own. For instance, consider this fragment: - - <b>Foo<b>Bar</b></b> - - This is perfectly valid (if bizarre) HTML. However, the - BeautifulSoup class will implicitly close the first b tag when it - encounters the second 'b'. It will think the author wrote - "<b>Foo<b>Bar", and didn't close the first 'b' tag, because - there's no real-world reason to bold something that's already - bold. When it encounters '</b></b>' it will close two more 'b' - tags, for a grand total of three tags closed instead of two. This - can throw off the rest of your document structure. The same is - true of a number of other tags, listed below. - - It's much more common for someone to forget to close a 'b' tag - than to actually use nested 'b' tags, and the BeautifulSoup class - handles the common case. This class handles the not-co-common - case: where you can't believe someone wrote what they did, but - it's valid HTML and BeautifulSoup screwed up by assuming it - wouldn't be.""" - - I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS = \ - ('em', 'big', 'i', 'small', 'tt', 'abbr', 'acronym', 'strong', - 'cite', 'code', 'dfn', 'kbd', 'samp', 'strong', 'var', 'b', - 'big') - - I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS = ('noscript',) - - NESTABLE_TAGS = buildTagMap([], BeautifulSoup.NESTABLE_TAGS, - I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS, - I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS) - -class MinimalSoup(BeautifulSoup): - """The MinimalSoup class is for parsing HTML that contains - pathologically bad markup. It makes no assumptions about tag - nesting, but it does know which tags are self-closing, that - <script> tags contain Javascript and should not be parsed, that - META tags may contain encoding information, and so on. - - This also makes it better for subclassing than BeautifulStoneSoup - or BeautifulSoup.""" - - RESET_NESTING_TAGS = buildTagMap('noscript') - NESTABLE_TAGS = {} - -class BeautifulSOAP(BeautifulStoneSoup): - """This class will push a tag with only a single string child into - the tag's parent as an attribute. The attribute's name is the tag - name, and the value is the string child. An example should give - the flavor of the change: - - <foo><bar>baz</bar></foo> - => - <foo bar="baz"><bar>baz</bar></foo> - - You can then access fooTag['bar'] instead of fooTag.barTag.string. - - This is, of course, useful for scraping structures that tend to - use subelements instead of attributes, such as SOAP messages. Note - that it modifies its input, so don't print the modified version - out. - - I'm not sure how many people really want to use this class; let me - know if you do. Mainly I like the name.""" - - def popTag(self): - if len(self.tagStack) > 1: - tag = self.tagStack[-1] - parent = self.tagStack[-2] - parent._getAttrMap() - if (isinstance(tag, Tag) and len(tag.contents) == 1 and - isinstance(tag.contents[0], NavigableString) and - not parent.attrMap.has_key(tag.name)): - parent[tag.name] = tag.contents[0] - BeautifulStoneSoup.popTag(self) - -#Enterprise class names! It has come to our attention that some people -#think the names of the Beautiful Soup parser classes are too silly -#and "unprofessional" for use in enterprise screen-scraping. We feel -#your pain! For such-minded folk, the Beautiful Soup Consortium And -#All-Night Kosher Bakery recommends renaming this file to -#"RobustParser.py" (or, in cases of extreme enterprisiness, -#"RobustParserBeanInterface.class") and using the following -#enterprise-friendly class aliases: -class RobustXMLParser(BeautifulStoneSoup): - pass -class RobustHTMLParser(BeautifulSoup): - pass -class RobustWackAssHTMLParser(ICantBelieveItsBeautifulSoup): - pass -class RobustInsanelyWackAssHTMLParser(MinimalSoup): - pass -class SimplifyingSOAPParser(BeautifulSOAP): - pass - -###################################################### -# -# Bonus library: Unicode, Dammit -# -# This class forces XML data into a standard format (usually to UTF-8 -# or Unicode). It is heavily based on code from Mark Pilgrim's -# Universal Feed Parser. It does not rewrite the XML or HTML to -# reflect a new encoding: that happens in BeautifulStoneSoup.handle_pi -# (XML) and BeautifulSoup.start_meta (HTML). - -# Autodetects character encodings. -# Download from http://chardet.feedparser.org/ -try: - import chardet -# import chardet.constants -# chardet.constants._debug = 1 -except ImportError: - chardet = None - -# cjkcodecs and iconv_codec make Python know about more character encodings. -# Both are available from http://cjkpython.i18n.org/ -# They're built in if you use Python 2.4. -try: - import cjkcodecs.aliases -except ImportError: - pass -try: - import iconv_codec -except ImportError: - pass - -class UnicodeDammit: - """A class for detecting the encoding of a *ML document and - converting it to a Unicode string. If the source encoding is - windows-1252, can replace MS smart quotes with their HTML or XML - equivalents.""" - - # This dictionary maps commonly seen values for "charset" in HTML - # meta tags to the corresponding Python codec names. It only covers - # values that aren't in Python's aliases and can't be determined - # by the heuristics in find_codec. - CHARSET_ALIASES = { "macintosh" : "mac-roman", - "x-sjis" : "shift-jis" } - - def __init__(self, markup, overrideEncodings=[], - smartQuotesTo='xml', isHTML=False): - self.declaredHTMLEncoding = None - self.markup, documentEncoding, sniffedEncoding = \ - self._detectEncoding(markup, isHTML) - self.smartQuotesTo = smartQuotesTo - self.triedEncodings = [] - if markup == '' or isinstance(markup, unicode): - self.originalEncoding = None - self.unicode = unicode(markup) - return - - u = None - for proposedEncoding in overrideEncodings: - u = self._convertFrom(proposedEncoding) - if u: break - if not u: - for proposedEncoding in (documentEncoding, sniffedEncoding): - u = self._convertFrom(proposedEncoding) - if u: break - - # If no luck and we have auto-detection library, try that: - if not u and chardet and not isinstance(self.markup, unicode): - u = self._convertFrom(chardet.detect(self.markup)['encoding']) - - # As a last resort, try utf-8 and windows-1252: - if not u: - for proposed_encoding in ("utf-8", "windows-1252"): - u = self._convertFrom(proposed_encoding) - if u: break - - self.unicode = u - if not u: self.originalEncoding = None - - def _subMSChar(self, orig): - """Changes a MS smart quote character to an XML or HTML - entity.""" - sub = self.MS_CHARS.get(orig) - if isinstance(sub, tuple): - if self.smartQuotesTo == 'xml': - sub = '&#x%s;' % sub[1] - else: - sub = '&%s;' % sub[0] - return sub - - def _convertFrom(self, proposed): - proposed = self.find_codec(proposed) - if not proposed or proposed in self.triedEncodings: - return None - self.triedEncodings.append(proposed) - markup = self.markup - - # Convert smart quotes to HTML if coming from an encoding - # that might have them. - if self.smartQuotesTo and proposed.lower() in("windows-1252", - "iso-8859-1", - "iso-8859-2"): - markup = re.compile("([\x80-\x9f])").sub \ - (lambda(x): self._subMSChar(x.group(1)), - markup) - - try: - # print "Trying to convert document to %s" % proposed - u = self._toUnicode(markup, proposed) - self.markup = u - self.originalEncoding = proposed - except Exception, e: - # print "That didn't work!" - # print e - return None - #print "Correct encoding: %s" % proposed - return self.markup - - def _toUnicode(self, data, encoding): - '''Given a string and its encoding, decodes the string into Unicode. - %encoding is a string recognized by encodings.aliases''' - - # strip Byte Order Mark (if present) - if (len(data) >= 4) and (data[:2] == '\xfe\xff') \ - and (data[2:4] != '\x00\x00'): - encoding = 'utf-16be' - data = data[2:] - elif (len(data) >= 4) and (data[:2] == '\xff\xfe') \ - and (data[2:4] != '\x00\x00'): - encoding = 'utf-16le' - data = data[2:] - elif data[:3] == '\xef\xbb\xbf': - encoding = 'utf-8' - data = data[3:] - elif data[:4] == '\x00\x00\xfe\xff': - encoding = 'utf-32be' - data = data[4:] - elif data[:4] == '\xff\xfe\x00\x00': - encoding = 'utf-32le' - data = data[4:] - newdata = unicode(data, encoding) - return newdata - - def _detectEncoding(self, xml_data, isHTML=False): - """Given a document, tries to detect its XML encoding.""" - xml_encoding = sniffed_xml_encoding = None - try: - if xml_data[:4] == '\x4c\x6f\xa7\x94': - # EBCDIC - xml_data = self._ebcdic_to_ascii(xml_data) - elif xml_data[:4] == '\x00\x3c\x00\x3f': - # UTF-16BE - sniffed_xml_encoding = 'utf-16be' - xml_data = unicode(xml_data, 'utf-16be').encode('utf-8') - elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') \ - and (xml_data[2:4] != '\x00\x00'): - # UTF-16BE with BOM - sniffed_xml_encoding = 'utf-16be' - xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8') - elif xml_data[:4] == '\x3c\x00\x3f\x00': - # UTF-16LE - sniffed_xml_encoding = 'utf-16le' - xml_data = unicode(xml_data, 'utf-16le').encode('utf-8') - elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and \ - (xml_data[2:4] != '\x00\x00'): - # UTF-16LE with BOM - sniffed_xml_encoding = 'utf-16le' - xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8') - elif xml_data[:4] == '\x00\x00\x00\x3c': - # UTF-32BE - sniffed_xml_encoding = 'utf-32be' - xml_data = unicode(xml_data, 'utf-32be').encode('utf-8') - elif xml_data[:4] == '\x3c\x00\x00\x00': - # UTF-32LE - sniffed_xml_encoding = 'utf-32le' - xml_data = unicode(xml_data, 'utf-32le').encode('utf-8') - elif xml_data[:4] == '\x00\x00\xfe\xff': - # UTF-32BE with BOM - sniffed_xml_encoding = 'utf-32be' - xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8') - elif xml_data[:4] == '\xff\xfe\x00\x00': - # UTF-32LE with BOM - sniffed_xml_encoding = 'utf-32le' - xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8') - elif xml_data[:3] == '\xef\xbb\xbf': - # UTF-8 with BOM - sniffed_xml_encoding = 'utf-8' - xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8') - else: - sniffed_xml_encoding = 'ascii' - pass - except: - xml_encoding_match = None - xml_encoding_match = re.compile( - '^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data) - if not xml_encoding_match and isHTML: - regexp = re.compile('<\s*meta[^>]+charset=([^>]*?)[;\'">]', re.I) - xml_encoding_match = regexp.search(xml_data) - if xml_encoding_match is not None: - xml_encoding = xml_encoding_match.groups()[0].lower() - if isHTML: - self.declaredHTMLEncoding = xml_encoding - if sniffed_xml_encoding and \ - (xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode', - 'iso-10646-ucs-4', 'ucs-4', 'csucs4', - 'utf-16', 'utf-32', 'utf_16', 'utf_32', - 'utf16', 'u16')): - xml_encoding = sniffed_xml_encoding - return xml_data, xml_encoding, sniffed_xml_encoding - - - def find_codec(self, charset): - return self._codec(self.CHARSET_ALIASES.get(charset, charset)) \ - or (charset and self._codec(charset.replace("-", ""))) \ - or (charset and self._codec(charset.replace("-", "_"))) \ - or charset - - def _codec(self, charset): - if not charset: return charset - codec = None - try: - codecs.lookup(charset) - codec = charset - except (LookupError, ValueError): - pass - return codec - - EBCDIC_TO_ASCII_MAP = None - def _ebcdic_to_ascii(self, s): - c = self.__class__ - if not c.EBCDIC_TO_ASCII_MAP: - emap = (0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15, - 16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31, - 128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7, - 144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26, - 32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33, - 38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94, - 45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63, - 186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34, - 195,97,98,99,100,101,102,103,104,105,196,197,198,199,200, - 201,202,106,107,108,109,110,111,112,113,114,203,204,205, - 206,207,208,209,126,115,116,117,118,119,120,121,122,210, - 211,212,213,214,215,216,217,218,219,220,221,222,223,224, - 225,226,227,228,229,230,231,123,65,66,67,68,69,70,71,72, - 73,232,233,234,235,236,237,125,74,75,76,77,78,79,80,81, - 82,238,239,240,241,242,243,92,159,83,84,85,86,87,88,89, - 90,244,245,246,247,248,249,48,49,50,51,52,53,54,55,56,57, - 250,251,252,253,254,255) - import string - c.EBCDIC_TO_ASCII_MAP = string.maketrans( \ - ''.join(map(chr, range(256))), ''.join(map(chr, emap))) - return s.translate(c.EBCDIC_TO_ASCII_MAP) - - MS_CHARS = { '\x80' : ('euro', '20AC'), - '\x81' : ' ', - '\x82' : ('sbquo', '201A'), - '\x83' : ('fnof', '192'), - '\x84' : ('bdquo', '201E'), - '\x85' : ('hellip', '2026'), - '\x86' : ('dagger', '2020'), - '\x87' : ('Dagger', '2021'), - '\x88' : ('circ', '2C6'), - '\x89' : ('permil', '2030'), - '\x8A' : ('Scaron', '160'), - '\x8B' : ('lsaquo', '2039'), - '\x8C' : ('OElig', '152'), - '\x8D' : '?', - '\x8E' : ('#x17D', '17D'), - '\x8F' : '?', - '\x90' : '?', - '\x91' : ('lsquo', '2018'), - '\x92' : ('rsquo', '2019'), - '\x93' : ('ldquo', '201C'), - '\x94' : ('rdquo', '201D'), - '\x95' : ('bull', '2022'), - '\x96' : ('ndash', '2013'), - '\x97' : ('mdash', '2014'), - '\x98' : ('tilde', '2DC'), - '\x99' : ('trade', '2122'), - '\x9a' : ('scaron', '161'), - '\x9b' : ('rsaquo', '203A'), - '\x9c' : ('oelig', '153'), - '\x9d' : '?', - '\x9e' : ('#x17E', '17E'), - '\x9f' : ('Yuml', ''),} - -####################################################################### - - -#By default, act as an HTML pretty-printer. -if __name__ == '__main__': - import sys - soup = BeautifulSoup(sys.stdin) - print soup.prettify() diff --git a/module/lib/MultipartPostHandler.py b/module/lib/MultipartPostHandler.py deleted file mode 100644 index 94aee0193..000000000 --- a/module/lib/MultipartPostHandler.py +++ /dev/null @@ -1,139 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -#### -# 02/2006 Will Holcomb <wholcomb@gmail.com> -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# 7/26/07 Slightly modified by Brian Schneider -# in order to support unicode files ( multipart_encode function ) -""" -Usage: - Enables the use of multipart/form-data for posting forms - -Inspirations: - Upload files in python: - http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306 - urllib2_file: - Fabien Seisen: <fabien@seisen.org> - -Example: - import MultipartPostHandler, urllib2, cookielib - - cookies = cookielib.CookieJar() - opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookies), - MultipartPostHandler.MultipartPostHandler) - params = { "username" : "bob", "password" : "riviera", - "file" : open("filename", "rb") } - opener.open("http://wwww.bobsite.com/upload/", params) - -Further Example: - The main function of this file is a sample which downloads a page and - then uploads it to the W3C validator. -""" - -from urllib import urlencode -from urllib2 import BaseHandler, HTTPHandler, build_opener -import mimetools, mimetypes -from os import write, remove -from cStringIO import StringIO - -class Callable: - def __init__(self, anycallable): - self.__call__ = anycallable - -# Controls how sequences are uncoded. If true, elements may be given multiple values by -# assigning a sequence. -doseq = 1 - -class MultipartPostHandler(BaseHandler): - handler_order = HTTPHandler.handler_order - 10 # needs to run first - - def http_request(self, request): - data = request.get_data() - if data is not None and type(data) != str: - v_files = [] - v_vars = [] - try: - for(key, value) in data.items(): - if type(value) == file: - v_files.append((key, value)) - else: - v_vars.append((key, value)) - except TypeError: - systype, value, traceback = sys.exc_info() - raise TypeError, "not a valid non-string sequence or mapping object", traceback - - if len(v_files) == 0: - data = urlencode(v_vars, doseq) - else: - boundary, data = self.multipart_encode(v_vars, v_files) - - contenttype = 'multipart/form-data; boundary=%s' % boundary - if(request.has_header('Content-Type') - and request.get_header('Content-Type').find('multipart/form-data') != 0): - print "Replacing %s with %s" % (request.get_header('content-type'), 'multipart/form-data') - request.add_unredirected_header('Content-Type', contenttype) - - request.add_data(data) - - return request - - def multipart_encode(vars, files, boundary = None, buf = None): - if boundary is None: - boundary = mimetools.choose_boundary() - if buf is None: - buf = StringIO() - for(key, value) in vars: - buf.write('--%s\r\n' % boundary) - buf.write('Content-Disposition: form-data; name="%s"' % key) - buf.write('\r\n\r\n' + value + '\r\n') - for(key, fd) in files: - #file_size = os.fstat(fd.fileno())[stat.ST_SIZE] - filename = fd.name.split('/')[-1] - contenttype = mimetypes.guess_type(filename)[0] or 'application/octet-stream' - buf.write('--%s\r\n' % boundary) - buf.write('Content-Disposition: form-data; name="%s"; filename="%s"\r\n' % (key, filename)) - buf.write('Content-Type: %s\r\n' % contenttype) - # buffer += 'Content-Length: %s\r\n' % file_size - fd.seek(0) - buf.write('\r\n' + fd.read() + '\r\n') - buf.write('--' + boundary + '--\r\n\r\n') - buf = buf.getvalue() - return boundary, buf - multipart_encode = Callable(multipart_encode) - - https_request = http_request - -def main(): - import tempfile, sys - - validatorURL = "http://validator.w3.org/check" - opener = build_opener(MultipartPostHandler) - - def validateFile(url): - temp = tempfile.mkstemp(suffix=".html") - write(temp[0], opener.open(url).read()) - params = { "ss" : "0", # show source - "doctype" : "Inline", - "uploaded_file" : open(temp[1], "rb") } - print opener.open(validatorURL, params).read() - remove(temp[1]) - - if len(sys.argv[1:]) > 0: - for arg in sys.argv[1:]: - validateFile(arg) - else: - validateFile("http://www.google.com") - -if __name__=="__main__": - main() diff --git a/module/lib/ReadWriteLock.py b/module/lib/ReadWriteLock.py new file mode 100644 index 000000000..cc82f3d48 --- /dev/null +++ b/module/lib/ReadWriteLock.py @@ -0,0 +1,232 @@ +# -*- coding: iso-8859-15 -*- +"""locks.py - Read-Write lock thread lock implementation + +See the class documentation for more info. + +Copyright (C) 2007, Heiko Wundram. +Released under the BSD-license. + +http://code.activestate.com/recipes/502283-read-write-lock-class-rlock-like/ +""" + +# Imports +# ------- + +from threading import Condition, Lock, currentThread +from time import time + + +# Read write lock +# --------------- + +class ReadWriteLock(object): + """Read-Write lock class. A read-write lock differs from a standard + threading.RLock() by allowing multiple threads to simultaneously hold a + read lock, while allowing only a single thread to hold a write lock at the + same point of time. + + When a read lock is requested while a write lock is held, the reader + is blocked; when a write lock is requested while another write lock is + held or there are read locks, the writer is blocked. + + Writers are always preferred by this implementation: if there are blocked + threads waiting for a write lock, current readers may request more read + locks (which they eventually should free, as they starve the waiting + writers otherwise), but a new thread requesting a read lock will not + be granted one, and block. This might mean starvation for readers if + two writer threads interweave their calls to acquireWrite() without + leaving a window only for readers. + + In case a current reader requests a write lock, this can and will be + satisfied without giving up the read locks first, but, only one thread + may perform this kind of lock upgrade, as a deadlock would otherwise + occur. After the write lock has been granted, the thread will hold a + full write lock, and not be downgraded after the upgrading call to + acquireWrite() has been match by a corresponding release(). + """ + + def __init__(self): + """Initialize this read-write lock.""" + + # Condition variable, used to signal waiters of a change in object + # state. + self.__condition = Condition(Lock()) + + # Initialize with no writers. + self.__writer = None + self.__upgradewritercount = 0 + self.__pendingwriters = [] + + # Initialize with no readers. + self.__readers = {} + + def acquire(self, blocking=True, timeout=None, shared=False): + if shared: + self.acquireRead(timeout) + else: + self.acquireWrite(timeout) + + def acquireRead(self, timeout=None): + """Acquire a read lock for the current thread, waiting at most + timeout seconds or doing a non-blocking check in case timeout is <= 0. + + In case timeout is None, the call to acquireRead blocks until the + lock request can be serviced. + + In case the timeout expires before the lock could be serviced, a + RuntimeError is thrown.""" + + if timeout is not None: + endtime = time() + timeout + me = currentThread() + self.__condition.acquire() + try: + if self.__writer is me: + # If we are the writer, grant a new read lock, always. + self.__writercount += 1 + return + while True: + if self.__writer is None: + # Only test anything if there is no current writer. + if self.__upgradewritercount or self.__pendingwriters: + if me in self.__readers: + # Only grant a read lock if we already have one + # in case writers are waiting for their turn. + # This means that writers can't easily get starved + # (but see below, readers can). + self.__readers[me] += 1 + return + # No, we aren't a reader (yet), wait for our turn. + else: + # Grant a new read lock, always, in case there are + # no pending writers (and no writer). + self.__readers[me] = self.__readers.get(me, 0) + 1 + return + if timeout is not None: + remaining = endtime - time() + if remaining <= 0: + # Timeout has expired, signal caller of this. + raise RuntimeError("Acquiring read lock timed out") + self.__condition.wait(remaining) + else: + self.__condition.wait() + finally: + self.__condition.release() + + def acquireWrite(self, timeout=None): + """Acquire a write lock for the current thread, waiting at most + timeout seconds or doing a non-blocking check in case timeout is <= 0. + + In case the write lock cannot be serviced due to the deadlock + condition mentioned above, a ValueError is raised. + + In case timeout is None, the call to acquireWrite blocks until the + lock request can be serviced. + + In case the timeout expires before the lock could be serviced, a + RuntimeError is thrown.""" + + if timeout is not None: + endtime = time() + timeout + me, upgradewriter = currentThread(), False + self.__condition.acquire() + try: + if self.__writer is me: + # If we are the writer, grant a new write lock, always. + self.__writercount += 1 + return + elif me in self.__readers: + # If we are a reader, no need to add us to pendingwriters, + # we get the upgradewriter slot. + if self.__upgradewritercount: + # If we are a reader and want to upgrade, and someone + # else also wants to upgrade, there is no way we can do + # this except if one of us releases all his read locks. + # Signal this to user. + raise ValueError( + "Inevitable dead lock, denying write lock" + ) + upgradewriter = True + self.__upgradewritercount = self.__readers.pop(me) + else: + # We aren't a reader, so add us to the pending writers queue + # for synchronization with the readers. + self.__pendingwriters.append(me) + while True: + if not self.__readers and self.__writer is None: + # Only test anything if there are no readers and writers. + if self.__upgradewritercount: + if upgradewriter: + # There is a writer to upgrade, and it's us. Take + # the write lock. + self.__writer = me + self.__writercount = self.__upgradewritercount + 1 + self.__upgradewritercount = 0 + return + # There is a writer to upgrade, but it's not us. + # Always leave the upgrade writer the advance slot, + # because he presumes he'll get a write lock directly + # from a previously held read lock. + elif self.__pendingwriters[0] is me: + # If there are no readers and writers, it's always + # fine for us to take the writer slot, removing us + # from the pending writers queue. + # This might mean starvation for readers, though. + self.__writer = me + self.__writercount = 1 + self.__pendingwriters = self.__pendingwriters[1:] + return + if timeout is not None: + remaining = endtime - time() + if remaining <= 0: + # Timeout has expired, signal caller of this. + if upgradewriter: + # Put us back on the reader queue. No need to + # signal anyone of this change, because no other + # writer could've taken our spot before we got + # here (because of remaining readers), as the test + # for proper conditions is at the start of the + # loop, not at the end. + self.__readers[me] = self.__upgradewritercount + self.__upgradewritercount = 0 + else: + # We were a simple pending writer, just remove us + # from the FIFO list. + self.__pendingwriters.remove(me) + raise RuntimeError("Acquiring write lock timed out") + self.__condition.wait(remaining) + else: + self.__condition.wait() + finally: + self.__condition.release() + + def release(self): + """Release the currently held lock. + + In case the current thread holds no lock, a ValueError is thrown.""" + + me = currentThread() + self.__condition.acquire() + try: + if self.__writer is me: + # We are the writer, take one nesting depth away. + self.__writercount -= 1 + if not self.__writercount: + # No more write locks; take our writer position away and + # notify waiters of the new circumstances. + self.__writer = None + self.__condition.notifyAll() + elif me in self.__readers: + # We are a reader currently, take one nesting depth away. + self.__readers[me] -= 1 + if not self.__readers[me]: + # No more read locks, take our reader position away. + del self.__readers[me] + if not self.__readers: + # No more readers, notify waiters of the new + # circumstances. + self.__condition.notifyAll() + else: + raise ValueError("Trying to release unheld lock") + finally: + self.__condition.release() diff --git a/module/lib/Unzip.py b/module/lib/Unzip.py deleted file mode 100644 index f56fbe751..000000000 --- a/module/lib/Unzip.py +++ /dev/null @@ -1,50 +0,0 @@ -import zipfile -import os - -class Unzip: - def __init__(self): - pass - - def extract(self, file, dir): - if not dir.endswith(':') and not os.path.exists(dir): - os.mkdir(dir) - - zf = zipfile.ZipFile(file) - - # create directory structure to house files - self._createstructure(file, dir) - - # extract files to directory structure - for i, name in enumerate(zf.namelist()): - - if not name.endswith('/') and not name.endswith("config"): - print "extracting", name.replace("pyload/","") - outfile = open(os.path.join(dir, name.replace("pyload/","")), 'wb') - outfile.write(zf.read(name)) - outfile.flush() - outfile.close() - - def _createstructure(self, file, dir): - self._makedirs(self._listdirs(file), dir) - - def _makedirs(self, directories, basedir): - """ Create any directories that don't currently exist """ - for dir in directories: - curdir = os.path.join(basedir, dir) - if not os.path.exists(curdir): - os.mkdir(curdir) - - def _listdirs(self, file): - """ Grabs all the directories in the zip structure - This is necessary to create the structure before trying - to extract the file to it. """ - zf = zipfile.ZipFile(file) - - dirs = [] - - for name in zf.namelist(): - if name.endswith('/'): - dirs.append(name.replace("pyload/","")) - - dirs.sort() - return dirs diff --git a/module/lib/bottle.py b/module/lib/bottle.py index 2c243278e..b00bda1c9 100644 --- a/module/lib/bottle.py +++ b/module/lib/bottle.py @@ -9,14 +9,14 @@ Python Standard Library. Homepage and documentation: http://bottlepy.org/ -Copyright (c) 2011, Marcel Hellkamp. -License: MIT (see LICENSE.txt for details) +Copyright (c) 2012, Marcel Hellkamp. +License: MIT (see LICENSE for details) """ from __future__ import with_statement __author__ = 'Marcel Hellkamp' -__version__ = '0.10.2' +__version__ = '0.11.4' __license__ = 'MIT' # The gevent server adapter needs to patch some modules before they are imported @@ -35,102 +35,111 @@ if __name__ == '__main__': if _cmd_options.server and _cmd_options.server.startswith('gevent'): import gevent.monkey; gevent.monkey.patch_all() -import sys -import base64 -import cgi -import email.utils -import functools -import hmac -import httplib -import imp -import itertools -import mimetypes -import os -import re -import subprocess -import tempfile -import thread -import threading -import time -import warnings - -from Cookie import SimpleCookie +import base64, cgi, email.utils, functools, hmac, imp, itertools, mimetypes,\ + os, re, subprocess, sys, tempfile, threading, time, urllib, warnings + from datetime import date as datedate, datetime, timedelta from tempfile import TemporaryFile from traceback import format_exc, print_exc -from urlparse import urljoin, SplitResult as UrlSplitResult - -# Workaround for a bug in some versions of lib2to3 (fixed on CPython 2.7 and 3.2) -import urllib -urlencode = urllib.urlencode -urlquote = urllib.quote -urlunquote = urllib.unquote - -try: from collections import MutableMapping as DictMixin -except ImportError: # pragma: no cover - from UserDict import DictMixin - -try: from urlparse import parse_qs -except ImportError: # pragma: no cover - from cgi import parse_qs - -try: import cPickle as pickle -except ImportError: # pragma: no cover - import pickle try: from json import dumps as json_dumps, loads as json_lds except ImportError: # pragma: no cover try: from simplejson import dumps as json_dumps, loads as json_lds - except ImportError: # pragma: no cover + except ImportError: try: from django.utils.simplejson import dumps as json_dumps, loads as json_lds - except ImportError: # pragma: no cover + except ImportError: def json_dumps(data): raise ImportError("JSON support requires Python 2.6 or simplejson.") json_lds = json_dumps -py3k = sys.version_info >= (3,0,0) -NCTextIOWrapper = None -if py3k: # pragma: no cover - json_loads = lambda s: json_lds(touni(s)) - # See Request.POST + +# We now try to fix 2.5/2.6/3.1/3.2 incompatibilities. +# It ain't pretty but it works... Sorry for the mess. + +py = sys.version_info +py3k = py >= (3,0,0) +py25 = py < (2,6,0) +py31 = (3,1,0) <= py < (3,2,0) + +# Workaround for the missing "as" keyword in py3k. +def _e(): return sys.exc_info()[1] + +# Workaround for the "print is a keyword/function" Python 2/3 dilemma +# and a fallback for mod_wsgi (resticts stdout/err attribute access) +try: + _stdout, _stderr = sys.stdout.write, sys.stderr.write +except IOError: + _stdout = lambda x: sys.stdout.write(x) + _stderr = lambda x: sys.stderr.write(x) + +# Lots of stdlib and builtin differences. +if py3k: + import http.client as httplib + import _thread as thread + from urllib.parse import urljoin, SplitResult as UrlSplitResult + from urllib.parse import urlencode, quote as urlquote, unquote as urlunquote + urlunquote = functools.partial(urlunquote, encoding='latin1') + from http.cookies import SimpleCookie + from collections import MutableMapping as DictMixin + import pickle from io import BytesIO - def touni(x, enc='utf8', err='strict'): - """ Convert anything to unicode """ - return str(x, enc, err) if isinstance(x, bytes) else str(x) - if sys.version_info < (3,2,0): - from io import TextIOWrapper - class NCTextIOWrapper(TextIOWrapper): - ''' Garbage collecting an io.TextIOWrapper(buffer) instance closes - the wrapped buffer. This subclass keeps it open. ''' - def close(self): pass -else: - json_loads = json_lds + basestring = str + unicode = str + json_loads = lambda s: json_lds(touni(s)) + callable = lambda x: hasattr(x, '__call__') + imap = map +else: # 2.x + import httplib + import thread + from urlparse import urljoin, SplitResult as UrlSplitResult + from urllib import urlencode, quote as urlquote, unquote as urlunquote + from Cookie import SimpleCookie + from itertools import imap + import cPickle as pickle from StringIO import StringIO as BytesIO - bytes = str - def touni(x, enc='utf8', err='strict'): - """ Convert anything to unicode """ - return x if isinstance(x, unicode) else unicode(str(x), enc, err) - -def tob(data, enc='utf8'): - """ Convert anything to bytes """ - return data.encode(enc) if isinstance(data, unicode) else bytes(data) + if py25: + from UserDict import DictMixin + def next(it): return it.next() + bytes = str + else: # 2.6, 2.7 + from collections import MutableMapping as DictMixin + json_loads = json_lds +# Some helpers for string/byte handling +def tob(s, enc='utf8'): + return s.encode(enc) if isinstance(s, unicode) else bytes(s) +def touni(s, enc='utf8', err='strict'): + return s.decode(enc, err) if isinstance(s, bytes) else unicode(s) tonat = touni if py3k else tob -tonat.__doc__ = """ Convert anything to native strings """ -def try_update_wrapper(wrapper, wrapped, *a, **ka): - try: # Bug: functools breaks if wrapper is an instane method - functools.update_wrapper(wrapper, wrapped, *a, **ka) +# 3.2 fixes cgi.FieldStorage to accept bytes (which makes a lot of sense). +# 3.1 needs a workaround. +if py31: + from io import TextIOWrapper + class NCTextIOWrapper(TextIOWrapper): + def close(self): pass # Keep wrapped buffer open. + +# File uploads (which are implemented as empty FiledStorage instances...) +# have a negative truth value. That makes no sense, here is a fix. +class FieldStorage(cgi.FieldStorage): + def __nonzero__(self): return bool(self.list or self.file) + if py3k: __bool__ = __nonzero__ + +# A bug in functools causes it to break if the wrapper is an instance method +def update_wrapper(wrapper, wrapped, *a, **ka): + try: functools.update_wrapper(wrapper, wrapped, *a, **ka) except AttributeError: pass -# Backward compatibility + + +# These helpers are used at module level and need to be defined first. +# And yes, I know PEP-8, but sometimes a lower-case classname makes more sense. + def depr(message): warnings.warn(message, DeprecationWarning, stacklevel=3) - -# Small helpers -def makelist(data): +def makelist(data): # This is just to handy if isinstance(data, (tuple, list, set, dict)): return list(data) elif data: return [data] else: return [] @@ -161,7 +170,7 @@ class DictProperty(object): del getattr(obj, self.attr)[self.key] -class CachedProperty(object): +class cached_property(object): ''' A property that is only computed once per instance and then replaces itself with an ordinary attribute. Deleting the attribute resets the property. ''' @@ -174,10 +183,8 @@ class CachedProperty(object): value = obj.__dict__[self.func.__name__] = self.func(obj) return value -cached_property = CachedProperty - -class lazy_attribute(object): # Does not need configuration -> lower-case name +class lazy_attribute(object): ''' A property that caches itself to the class object. ''' def __init__(self, func): functools.update_wrapper(self, func, updated=[]) @@ -203,35 +210,6 @@ class BottleException(Exception): pass -#TODO: These should subclass BaseRequest - -class HTTPResponse(BottleException): - """ Used to break execution and immediately finish the response """ - def __init__(self, output='', status=200, header=None): - super(BottleException, self).__init__("HTTP Response %d" % status) - self.status = int(status) - self.output = output - self.headers = HeaderDict(header) if header else None - - def apply(self, response): - if self.headers: - for key, value in self.headers.iterallitems(): - response.headers[key] = value - response.status = self.status - - -class HTTPError(HTTPResponse): - """ Used to generate an error page """ - def __init__(self, code=500, output='Unknown Error', exception=None, - traceback=None, header=None): - super(HTTPError, self).__init__(output, code, header) - self.exception = exception - self.traceback = traceback - - def __repr__(self): - return template(ERROR_PAGE_TEMPLATE, e=self) - - @@ -251,12 +229,15 @@ class RouteReset(BottleException): class RouterUnknownModeError(RouteError): pass + class RouteSyntaxError(RouteError): """ The route parser found something not supported by this router """ + class RouteBuildError(RouteError): """ The route could not been built """ + class Router(object): ''' A Router is an ordered collection of route->target pairs. It is used to efficiently match WSGI requests against a number of routes and return @@ -285,7 +266,7 @@ class Router(object): #: If true, static routes are no longer checked first. self.strict_order = strict self.filters = {'re': self.re_filter, 'int': self.int_filter, - 'float': self.re_filter, 'path': self.path_filter} + 'float': self.float_filter, 'path': self.path_filter} def re_filter(self, conf): return conf or self.default_pattern, None, None @@ -294,17 +275,17 @@ class Router(object): return r'-?\d+', int, lambda x: str(int(x)) def float_filter(self, conf): - return r'-?\d*\.\d+', float, lambda x: str(float(x)) + return r'-?[\d.]+', float, lambda x: str(float(x)) def path_filter(self, conf): - return r'.*?', None, None - + return r'.+?', None, None + def add_filter(self, name, func): ''' Add a filter. The provided function is called with the configuration string as parameter and must return a (regexp, to_python, to_url) tuple. The first element is a string, the last two are callables or None. ''' self.filters[name] = func - + def parse_rule(self, rule): ''' Parses a rule into a (name, filter, conf) token stream. If mode is None, name contains a static rule part. ''' @@ -366,8 +347,8 @@ class Router(object): try: re_match = re.compile('^(%s)$' % pattern).match - except re.error, e: - raise RouteSyntaxError("Could not add Route: %s (%s)" % (rule, e)) + except re.error: + raise RouteSyntaxError("Could not add Route: %s (%s)" % (rule, _e())) def match(path): """ Return an url-argument dictionary. """ @@ -383,7 +364,7 @@ class Router(object): combined = '%s|(^%s$)' % (self.dynamic[-1][0].pattern, flat_pattern) self.dynamic[-1] = (re.compile(combined), self.dynamic[-1][1]) self.dynamic[-1][1].append((match, target)) - except (AssertionError, IndexError), e: # AssertionError: Too many groups + except (AssertionError, IndexError): # AssertionError: Too many groups self.dynamic.append((re.compile('(^%s$)' % flat_pattern), [(match, target)])) return match @@ -396,8 +377,8 @@ class Router(object): for i, value in enumerate(anons): query['anon%d'%i] = value url = ''.join([f(query.pop(n)) if n else f for (n,f) in builder]) return url if not query else url+'?'+urlencode(query) - except KeyError, e: - raise RouteBuildError('Missing URL argument: %r' % e.args[0]) + except KeyError: + raise RouteBuildError('Missing URL argument: %r' % _e().args[0]) def match(self, environ): ''' Return a (target, url_agrs) tuple or raise HTTPError(400/404/405). ''' @@ -424,9 +405,7 @@ class Router(object): allowed = [verb for verb in targets if verb != 'ANY'] if 'GET' in allowed and 'HEAD' not in allowed: allowed.append('HEAD') - raise HTTPError(405, "Method not allowed.", - header=[('Allow',",".join(allowed))]) - + raise HTTPError(405, "Method not allowed.", Allow=",".join(allowed)) class Route(object): @@ -435,7 +414,6 @@ class Route(object): turing an URL path rule into a regular expression usable by the Router. ''' - def __init__(self, app, rule, method, callback, name=None, plugins=None, skiplist=None, **config): #: The application this route is installed to. @@ -509,9 +487,12 @@ class Route(object): except RouteReset: # Try again with changed configuration. return self._make_callback() if not callback is self.callback: - try_update_wrapper(callback, self.callback) + update_wrapper(callback, self.callback) return callback + def __repr__(self): + return '<%s %r %r>' % (self.method, self.rule, self.callback) + @@ -523,27 +504,38 @@ class Route(object): class Bottle(object): - """ WSGI application """ + """ Each Bottle object represents a single, distinct web application and + consists of routes, callbacks, plugins, resources and configuration. + Instances are callable WSGI applications. + + :param catchall: If true (default), handle all exceptions. Turn off to + let debugging middleware handle exceptions. + """ + + def __init__(self, catchall=True, autojson=True): + #: If true, most exceptions are caught and returned as :exc:`HTTPError` + self.catchall = catchall + + #: A :class:`ResourceManager` for application files + self.resources = ResourceManager() + + #: A :class:`ConfigDict` for app specific configuration. + self.config = ConfigDict() + self.config.autojson = autojson - def __init__(self, catchall=True, autojson=True, config=None): - """ Create a new bottle instance. - You usually don't do that. Use `bottle.app.push()` instead. - """ self.routes = [] # List of installed :class:`Route` instances. self.router = Router() # Maps requests to :class:`Route` instances. - self.plugins = [] # List of installed plugins. - self.error_handler = {} - #: If true, most exceptions are catched and returned as :exc:`HTTPError` - self.config = ConfigDict(config or {}) - self.catchall = catchall - #: An instance of :class:`HooksPlugin`. Empty by default. + + # Core plugins + self.plugins = [] # List of installed plugins. self.hooks = HooksPlugin() self.install(self.hooks) - if autojson: + if self.config.autojson: self.install(JSONPlugin()) self.install(TemplatePlugin()) + def mount(self, prefix, app, **options): ''' Mount an application (:class:`Bottle` or plain WSGI) to a specific URL prefix. Example:: @@ -560,14 +552,11 @@ class Bottle(object): prefix, app = app, prefix depr('Parameter order of Bottle.mount() changed.') # 0.10 - parts = filter(None, prefix.split('/')) - if not parts: raise ValueError('Empty path prefix.') - path_depth = len(parts) - options.setdefault('skip', True) - options.setdefault('method', 'ANY') + segments = [p for p in prefix.split('/') if p] + if not segments: raise ValueError('Empty path prefix.') + path_depth = len(segments) - @self.route('/%s/:#.*#' % '/'.join(parts), **options) - def mountpoint(): + def mountpoint_wrapper(): try: request.path_shift(path_depth) rs = BaseResponse([], 200) @@ -575,13 +564,30 @@ class Bottle(object): rs.status = status for name, value in header: rs.add_header(name, value) return rs.body.append - rs.body = itertools.chain(rs.body, app(request.environ, start_response)) - return HTTPResponse(rs.body, rs.status, rs.headers) + body = app(request.environ, start_response) + body = itertools.chain(rs.body, body) + return HTTPResponse(body, rs.status_code, **rs.headers) finally: request.path_shift(-path_depth) + options.setdefault('skip', True) + options.setdefault('method', 'ANY') + options.setdefault('mountpoint', {'prefix': prefix, 'target': app}) + options['callback'] = mountpoint_wrapper + + self.route('/%s/<:re:.*>' % '/'.join(segments), **options) if not prefix.endswith('/'): - self.route('/' + '/'.join(parts), callback=mountpoint, **options) + self.route('/' + '/'.join(segments), **options) + + def merge(self, routes): + ''' Merge the routes of another :class:`Bottle` application or a list of + :class:`Route` objects into this application. The routes keep their + 'owner', meaning that the :data:`Route.app` attribute is not + changed. ''' + if isinstance(routes, Bottle): + routes = routes.routes + for route in routes: + self.add_route(route) def install(self, plugin): ''' Add a plugin to the list of plugins and prepare it for being @@ -610,6 +616,10 @@ class Bottle(object): if removed: self.reset() return removed + def run(self, **kwargs): + ''' Calls :func:`run` with the same parameters. ''' + run(self, **kwargs) + def reset(self, route=None): ''' Reset all routes (force plugins to be re-applied) and clear all caches. If an ID or route object is given, only that specific route @@ -640,6 +650,13 @@ class Bottle(object): location = self.router.build(routename, **kargs).lstrip('/') return urljoin(urljoin('/', scriptname), location) + def add_route(self, route): + ''' Add a route object, but do not change the :data:`Route.app` + attribute.''' + self.routes.append(route) + self.router.add(route.rule, route.method, route, name=route.name) + if DEBUG: route.prepare() + def route(self, path=None, method='GET', callback=None, name=None, apply=None, skip=None, **config): """ A decorator to bind a function to a request URL. Example:: @@ -678,9 +695,7 @@ class Bottle(object): verb = verb.upper() route = Route(self, rule, verb, callback, name=name, plugins=plugins, skiplist=skiplist, **config) - self.routes.append(route) - self.router.add(rule, verb, route, name=name) - if DEBUG: route.prepare() + self.add_route(route) return callback return decorator(callback) if callback else decorator @@ -708,7 +723,13 @@ class Bottle(object): return wrapper def hook(self, name): - """ Return a decorator that attaches a callback to a hook. """ + """ Return a decorator that attaches a callback to a hook. Three hooks + are currently implemented: + + - before_request: Executed once before each request + - after_request: Executed once after each request + - app_reset: Called whenever :meth:`reset` is called. + """ def wrapper(func): self.hooks.add(name, func) return func @@ -716,8 +737,8 @@ class Bottle(object): def handle(self, path, method='GET'): """ (deprecated) Execute the first matching route callback and return - the result. :exc:`HTTPResponse` exceptions are catched and returned. - If :attr:`Bottle.catchall` is true, other exceptions are catched as + the result. :exc:`HTTPResponse` exceptions are caught and returned. + If :attr:`Bottle.catchall` is true, other exceptions are caught as well and returned as :exc:`HTTPError` instances (500). """ depr("This method will change semantics in 0.10. Try to avoid it.") @@ -725,26 +746,33 @@ class Bottle(object): return self._handle(path) return self._handle({'PATH_INFO': path, 'REQUEST_METHOD': method.upper()}) + def default_error_handler(self, res): + return tob(template(ERROR_PAGE_TEMPLATE, e=res)) + def _handle(self, environ): try: + environ['bottle.app'] = self + request.bind(environ) + response.bind() route, args = self.router.match(environ) - environ['route.handle'] = environ['bottle.route'] = route + environ['route.handle'] = route + environ['bottle.route'] = route environ['route.url_args'] = args return route.call(**args) - except HTTPResponse, r: - return r + except HTTPResponse: + return _e() except RouteReset: route.reset() return self._handle(environ) except (KeyboardInterrupt, SystemExit, MemoryError): raise - except Exception, e: + except Exception: if not self.catchall: raise - stacktrace = format_exc(10) + stacktrace = format_exc() environ['wsgi.errors'].write(stacktrace) - return HTTPError(500, "Internal Server Error", e, stacktrace) + return HTTPError(500, "Internal Server Error", _e(), stacktrace) - def _cast(self, out, request, response, peek=None): + def _cast(self, out, peek=None): """ Try to convert the parameter into something WSGI compatible and set correct HTTP headers when possible. Support: False, str, unicode, dict, HTTPResponse, HTTPError, file-like, @@ -753,7 +781,8 @@ class Bottle(object): # Empty output is done here if not out: - response['Content-Length'] = 0 + if 'Content-Length' not in response: + response['Content-Length'] = 0 return [] # Join lists of byte or unicode strings. Mixed lists are NOT supported if isinstance(out, (tuple, list))\ @@ -764,19 +793,18 @@ class Bottle(object): out = out.encode(response.charset) # Byte Strings are just returned if isinstance(out, bytes): - response['Content-Length'] = len(out) + if 'Content-Length' not in response: + response['Content-Length'] = len(out) return [out] # HTTPError or HTTPException (recursive, because they may wrap anything) # TODO: Handle these explicitly in handle() or make them iterable. if isinstance(out, HTTPError): out.apply(response) - out = self.error_handler.get(out.status, repr)(out) - if isinstance(out, HTTPResponse): - depr('Error handlers must not return :exc:`HTTPResponse`.') #0.9 - return self._cast(out, request, response) + out = self.error_handler.get(out.status_code, self.default_error_handler)(out) + return self._cast(out) if isinstance(out, HTTPResponse): out.apply(response) - return self._cast(out.output, request, response) + return self._cast(out.body) # File-like objects. if hasattr(out, 'read'): @@ -788,54 +816,54 @@ class Bottle(object): # Handle Iterables. We peek into them to detect their inner type. try: out = iter(out) - first = out.next() + first = next(out) while not first: - first = out.next() + first = next(out) except StopIteration: - return self._cast('', request, response) - except HTTPResponse, e: - first = e - except Exception, e: - first = HTTPError(500, 'Unhandled exception', e, format_exc(10)) - if isinstance(e, (KeyboardInterrupt, SystemExit, MemoryError))\ - or not self.catchall: - raise + return self._cast('') + except HTTPResponse: + first = _e() + except (KeyboardInterrupt, SystemExit, MemoryError): + raise + except Exception: + if not self.catchall: raise + first = HTTPError(500, 'Unhandled exception', _e(), format_exc()) + # These are the inner types allowed in iterator or generator objects. if isinstance(first, HTTPResponse): - return self._cast(first, request, response) + return self._cast(first) if isinstance(first, bytes): return itertools.chain([first], out) if isinstance(first, unicode): - return itertools.imap(lambda x: x.encode(response.charset), + return imap(lambda x: x.encode(response.charset), itertools.chain([first], out)) return self._cast(HTTPError(500, 'Unsupported response type: %s'\ - % type(first)), request, response) + % type(first))) def wsgi(self, environ, start_response): """ The bottle WSGI-interface. """ try: - environ['bottle.app'] = self - request.bind(environ) - response.bind() - out = self._cast(self._handle(environ), request, response) + out = self._cast(self._handle(environ)) # rfc2616 section 4.3 if response._status_code in (100, 101, 204, 304)\ - or request.method == 'HEAD': + or environ['REQUEST_METHOD'] == 'HEAD': if hasattr(out, 'close'): out.close() out = [] - start_response(response._status_line, list(response.iter_headers())) + start_response(response._status_line, response.headerlist) return out except (KeyboardInterrupt, SystemExit, MemoryError): raise - except Exception, e: + except Exception: if not self.catchall: raise err = '<h1>Critical error while processing request: %s</h1>' \ - % environ.get('PATH_INFO', '/') + % html_escape(environ.get('PATH_INFO', '/')) if DEBUG: - err += '<h2>Error:</h2>\n<pre>%s</pre>\n' % repr(e) - err += '<h2>Traceback:</h2>\n<pre>%s</pre>\n' % format_exc(10) - environ['wsgi.errors'].write(err) #TODO: wsgi.error should not get html - start_response('500 INTERNAL SERVER ERROR', [('Content-Type', 'text/html')]) + err += '<h2>Error:</h2>\n<pre>\n%s\n</pre>\n' \ + '<h2>Traceback:</h2>\n<pre>\n%s\n</pre>\n' \ + % (html_escape(repr(_e())), html_escape(format_exc())) + environ['wsgi.errors'].write(err) + headers = [('Content-Type', 'text/html; charset=UTF-8')] + start_response('500 INTERNAL SERVER ERROR', headers) return [tob(err)] def __call__(self, environ, start_response): @@ -852,19 +880,33 @@ class Bottle(object): ############################################################################### -class BaseRequest(DictMixin): +class BaseRequest(object): """ A wrapper for WSGI environment dictionaries that adds a lot of - convenient access methods and properties. Most of them are read-only.""" + convenient access methods and properties. Most of them are read-only. + + Adding new attributes to a request actually adds them to the environ + dictionary (as 'bottle.request.ext.<name>'). This is the recommended + way to store and access request-specific data. + """ + + __slots__ = ('environ') #: Maximum size of memory buffer for :attr:`body` in bytes. MEMFILE_MAX = 102400 + #: Maximum number pr GET or POST parameters per request + MAX_PARAMS = 100 - def __init__(self, environ): + def __init__(self, environ=None): """ Wrap a WSGI environ dictionary. """ #: The wrapped WSGI environ dictionary. This is the only real attribute. #: All other attributes actually are read-only properties. - self.environ = environ - environ['bottle.request'] = self + self.environ = {} if environ is None else environ + self.environ['bottle.request'] = self + + @DictProperty('environ', 'bottle.app', read_only=True) + def app(self): + ''' Bottle application handling this request. ''' + raise RuntimeError('This request is not connected to an application.') @property def path(self): @@ -892,7 +934,8 @@ class BaseRequest(DictMixin): """ Cookies parsed into a :class:`FormsDict`. Signed cookies are NOT decoded. Use :meth:`get_cookie` if you expect signed cookies. """ cookies = SimpleCookie(self.environ.get('HTTP_COOKIE','')) - return FormsDict((c.key, c.value) for c in cookies.itervalues()) + cookies = list(cookies.values())[:self.MAX_PARAMS] + return FormsDict((c.key, c.value) for c in cookies) def get_cookie(self, key, default=None, secret=None): """ Return the content of a cookie. To read a `Signed Cookie`, the @@ -911,11 +954,10 @@ class BaseRequest(DictMixin): values are sometimes called "URL arguments" or "GET parameters", but not to be confused with "URL wildcards" as they are provided by the :class:`Router`. ''' - data = parse_qs(self.query_string, keep_blank_values=True) get = self.environ['bottle.get'] = FormsDict() - for key, values in data.iteritems(): - for value in values: - get[key] = value + pairs = _parse_qsl(self.environ.get('QUERY_STRING', '')) + for key, value in pairs[:self.MAX_PARAMS]: + get[key] = value return get @DictProperty('environ', 'bottle.request.forms', read_only=True) @@ -925,7 +967,7 @@ class BaseRequest(DictMixin): :class:`FormsDict`. All keys and values are strings. File uploads are stored separately in :attr:`files`. """ forms = FormsDict() - for name, item in self.POST.iterallitems(): + for name, item in self.POST.allitems(): if not hasattr(item, 'filename'): forms[name] = item return forms @@ -935,9 +977,9 @@ class BaseRequest(DictMixin): """ A :class:`FormsDict` with the combined values of :attr:`query` and :attr:`forms`. File uploads are stored in :attr:`files`. """ params = FormsDict() - for key, value in self.query.iterallitems(): + for key, value in self.query.allitems(): params[key] = value - for key, value in self.forms.iterallitems(): + for key, value in self.forms.allitems(): params[key] = value return params @@ -959,7 +1001,7 @@ class BaseRequest(DictMixin): on big files. """ files = FormsDict() - for name, item in self.POST.iterallitems(): + for name, item in self.POST.allitems(): if hasattr(item, 'filename'): files[name] = item return files @@ -1009,15 +1051,26 @@ class BaseRequest(DictMixin): instances of :class:`cgi.FieldStorage` (file uploads). """ post = FormsDict() + # We default to application/x-www-form-urlencoded for everything that + # is not multipart and take the fast path (also: 3.1 workaround) + if not self.content_type.startswith('multipart/'): + maxlen = max(0, min(self.content_length, self.MEMFILE_MAX)) + pairs = _parse_qsl(tonat(self.body.read(maxlen), 'latin1')) + for key, value in pairs[:self.MAX_PARAMS]: + post[key] = value + return post + safe_env = {'QUERY_STRING':''} # Build a safe environment for cgi for key in ('REQUEST_METHOD', 'CONTENT_TYPE', 'CONTENT_LENGTH'): if key in self.environ: safe_env[key] = self.environ[key] - if NCTextIOWrapper: - fb = NCTextIOWrapper(self.body, encoding='ISO-8859-1', newline='\n') - else: - fb = self.body - data = cgi.FieldStorage(fp=fb, environ=safe_env, keep_blank_values=True) - for item in data.list or []: + args = dict(fp=self.body, environ=safe_env, keep_blank_values=True) + if py31: + args['fp'] = NCTextIOWrapper(args['fp'], encoding='ISO-8859-1', + newline='\n') + elif py3k: + args['encoding'] = 'ISO-8859-1' + data = FieldStorage(**args) + for item in (data.list or [])[:self.MAX_PARAMS]: post[item.name] = item if item.filename else item.value return post @@ -1042,7 +1095,7 @@ class BaseRequest(DictMixin): but the fragment is always empty because it is not visible to the server. ''' env = self.environ - http = env.get('wsgi.url_scheme', 'http') + http = env.get('HTTP_X_FORWARDED_PROTO') or env.get('wsgi.url_scheme', 'http') host = env.get('HTTP_X_FORWARDED_HOST') or env.get('HTTP_HOST') if not host: # HTTP 1.1 requires a Host-header. This is for HTTP/1.0 clients. @@ -1091,6 +1144,11 @@ class BaseRequest(DictMixin): return int(self.environ.get('CONTENT_LENGTH') or -1) @property + def content_type(self): + ''' The Content-Type header as a lowercase-string (default: empty). ''' + return self.environ.get('CONTENT_TYPE', '').lower() + + @property def is_xhr(self): ''' True if the request was triggered by a XMLHttpRequest. This only works with JavaScript libraries that support the `X-Requested-With` @@ -1139,6 +1197,7 @@ class BaseRequest(DictMixin): """ Return a new :class:`Request` with a shallow :attr:`environ` copy. """ return Request(self.environ.copy()) + def get(self, value, default=None): return self.environ.get(value, default) def __getitem__(self, key): return self.environ[key] def __delitem__(self, key): self[key] = ""; del(self.environ[key]) def __iter__(self): return iter(self.environ) @@ -1166,27 +1225,41 @@ class BaseRequest(DictMixin): def __repr__(self): return '<%s: %s %s>' % (self.__class__.__name__, self.method, self.url) + def __getattr__(self, name): + ''' Search in self.environ for additional user defined attributes. ''' + try: + var = self.environ['bottle.request.ext.%s'%name] + return var.__get__(self) if hasattr(var, '__get__') else var + except KeyError: + raise AttributeError('Attribute %r not defined.' % name) + + def __setattr__(self, name, value): + if name == 'environ': return object.__setattr__(self, name, value) + self.environ['bottle.request.ext.%s'%name] = value + + + + def _hkey(s): return s.title().replace('_','-') class HeaderProperty(object): def __init__(self, name, reader=None, writer=str, default=''): - self.name, self.reader, self.writer, self.default = name, reader, writer, default + self.name, self.default = name, default + self.reader, self.writer = reader, writer self.__doc__ = 'Current value of the %r header.' % name.title() def __get__(self, obj, cls): if obj is None: return self - value = obj.headers.get(self.name) - return self.reader(value) if (value and self.reader) else (value or self.default) + value = obj.headers.get(self.name, self.default) + return self.reader(value) if self.reader else value def __set__(self, obj, value): - if self.writer: value = self.writer(value) - obj.headers[self.name] = value + obj.headers[self.name] = self.writer(value) def __delete__(self, obj): - if self.name in obj.headers: - del obj.headers[self.name] + del obj.headers[self.name] class BaseResponse(object): @@ -1209,11 +1282,9 @@ class BaseResponse(object): 'Content-Md5', 'Last-Modified'))} def __init__(self, body='', status=None, **headers): - self._status_line = None - self._status_code = None - self.body = body self._cookies = None self._headers = {'Content-Type': [self.default_content_type]} + self.body = body self.status = status or self.default_status if headers: for name, value in headers.items(): @@ -1253,26 +1324,24 @@ class BaseResponse(object): raise ValueError('String status line without a reason phrase.') if not 100 <= code <= 999: raise ValueError('Status code out of range.') self._status_code = code - self._status_line = status or ('%d Unknown' % code) + self._status_line = str(status or ('%d Unknown' % code)) def _get_status(self): - depr('BaseReuqest.status will change to return a string in 0.11. Use'\ - ' status_line and status_code to make sure.') #0.10 - return self._status_code + return self._status_line status = property(_get_status, _set_status, None, ''' A writeable property to change the HTTP response status. It accepts either a numeric code (100-999) or a string with a custom reason phrase (e.g. "404 Brain not found"). Both :data:`status_line` and - :data:`status_code` are updates accordingly. The return value is - always a numeric code. ''') + :data:`status_code` are updated accordingly. The return value is + always a status string. ''') del _get_status, _set_status @property def headers(self): ''' An instance of :class:`HeaderDict`, a case-insensitive dict-like view on the response headers. ''' - self.__dict__['headers'] = hdict = HeaderDict() + hdict = HeaderDict() hdict.dict = self._headers return hdict @@ -1286,13 +1355,10 @@ class BaseResponse(object): header with that name, return a default value. ''' return self._headers.get(_hkey(name), [default])[-1] - def set_header(self, name, value, append=False): + def set_header(self, name, value): ''' Create a new response header, replacing any previously defined headers with the same name. ''' - if append: - self.add_header(name, value) - else: - self._headers[_hkey(name)] = [str(value)] + self._headers[_hkey(name)] = [str(value)] def add_header(self, name, value): ''' Add an additional response header, not removing duplicates. ''' @@ -1301,16 +1367,7 @@ class BaseResponse(object): def iter_headers(self): ''' Yield (header, value) tuples, skipping headers that are not allowed with the current response status code. ''' - headers = self._headers.iteritems() - bad_headers = self.bad_headers.get(self.status_code) - if bad_headers: - headers = [h for h in headers if h[0] not in bad_headers] - for name, values in headers: - for value in values: - yield name, value - if self._cookies: - for c in self._cookies.values(): - yield 'Set-Cookie', c.OutputString() + return self.headerlist def wsgiheader(self): depr('The wsgiheader method is deprecated. See headerlist.') #0.10 @@ -1319,7 +1376,16 @@ class BaseResponse(object): @property def headerlist(self): ''' WSGI conform list of (header, value) tuples. ''' - return list(self.iter_headers()) + out = [] + headers = self._headers.items() + if self._status_code in self.bad_headers: + bad_headers = self.bad_headers[self._status_code] + headers = [h for h in headers if h[0] not in bad_headers] + out += [(name, val) for name, vals in headers for val in vals] + if self._cookies: + for c in self._cookies.values(): + out.append(('Set-Cookie', c.OutputString())) + return out content_type = HeaderProperty('Content-Type') content_length = HeaderProperty('Content-Length', reader=int) @@ -1384,7 +1450,7 @@ class BaseResponse(object): if len(value) > 4096: raise ValueError('Cookie value to long.') self._cookies[name] = value - for key, value in options.iteritems(): + for key, value in options.items(): if key == 'max_age': if isinstance(value, timedelta): value = value.seconds + value.days * 24 * 3600 @@ -1409,20 +1475,76 @@ class BaseResponse(object): out += '%s: %s\n' % (name.title(), value.strip()) return out +#: Thread-local storage for :class:`LocalRequest` and :class:`LocalResponse` +#: attributes. +_lctx = threading.local() -class LocalRequest(BaseRequest, threading.local): - ''' A thread-local subclass of :class:`BaseRequest`. ''' - def __init__(self): pass +def local_property(name): + def fget(self): + try: + return getattr(_lctx, name) + except AttributeError: + raise RuntimeError("Request context not initialized.") + def fset(self, value): setattr(_lctx, name, value) + def fdel(self): delattr(_lctx, name) + return property(fget, fset, fdel, + 'Thread-local property stored in :data:`_lctx.%s`' % name) + + +class LocalRequest(BaseRequest): + ''' A thread-local subclass of :class:`BaseRequest` with a different + set of attribues for each thread. There is usually only one global + instance of this class (:data:`request`). If accessed during a + request/response cycle, this instance always refers to the *current* + request (even on a multithreaded server). ''' bind = BaseRequest.__init__ + environ = local_property('request_environ') -class LocalResponse(BaseResponse, threading.local): - ''' A thread-local subclass of :class:`BaseResponse`. ''' +class LocalResponse(BaseResponse): + ''' A thread-local subclass of :class:`BaseResponse` with a different + set of attribues for each thread. There is usually only one global + instance of this class (:data:`response`). Its attributes are used + to build the HTTP response at the end of the request/response cycle. + ''' bind = BaseResponse.__init__ + _status_line = local_property('response_status_line') + _status_code = local_property('response_status_code') + _cookies = local_property('response_cookies') + _headers = local_property('response_headers') + body = local_property('response_body') + +Request = BaseRequest +Response = BaseResponse + +class HTTPResponse(Response, BottleException): + def __init__(self, body='', status=None, header=None, **headers): + if header or 'output' in headers: + depr('Call signature changed (for the better)') + if header: headers.update(header) + if 'output' in headers: body = headers.pop('output') + super(HTTPResponse, self).__init__(body, status, **headers) + + def apply(self, response): + response._status_code = self._status_code + response._status_line = self._status_line + response._headers = self._headers + response._cookies = self._cookies + response.body = self.body -Response = LocalResponse # BC 0.9 -Request = LocalRequest # BC 0.9 + def _output(self, value=None): + depr('Use HTTPResponse.body instead of HTTPResponse.output') + if value is None: return self.body + self.body = value + output = property(_output, _output, doc='Alias for .body') + +class HTTPError(HTTPResponse): + default_status = 500 + def __init__(self, status=None, body=None, exception=None, traceback=None, header=None, **headers): + self.exception = exception + self.traceback = traceback + super(HTTPError, self).__init__(body, status, header, **headers) @@ -1441,7 +1563,7 @@ class JSONPlugin(object): def __init__(self, json_dumps=json_dumps): self.json_dumps = json_dumps - def apply(self, callback, context): + def apply(self, callback, route): dumps = self.json_dumps if not dumps: return callback def wrapper(*a, **ka): @@ -1491,7 +1613,7 @@ class HooksPlugin(object): if ka.pop('reversed', False): hooks = hooks[::-1] return [hook(*a, **ka) for hook in hooks] - def apply(self, callback, context): + def apply(self, callback, route): if self._empty(): return callback def wrapper(*a, **ka): self.trigger('before_request') @@ -1566,26 +1688,37 @@ class MultiDict(DictMixin): """ def __init__(self, *a, **k): - self.dict = dict((k, [v]) for k, v in dict(*a, **k).iteritems()) + self.dict = dict((k, [v]) for (k, v) in dict(*a, **k).items()) + def __len__(self): return len(self.dict) def __iter__(self): return iter(self.dict) def __contains__(self, key): return key in self.dict def __delitem__(self, key): del self.dict[key] def __getitem__(self, key): return self.dict[key][-1] def __setitem__(self, key, value): self.append(key, value) - def iterkeys(self): return self.dict.iterkeys() - def itervalues(self): return (v[-1] for v in self.dict.itervalues()) - def iteritems(self): return ((k, v[-1]) for (k, v) in self.dict.iteritems()) - def iterallitems(self): - for key, values in self.dict.iteritems(): - for value in values: - yield key, value - - # 2to3 is not able to fix these automatically. - keys = iterkeys if py3k else lambda self: list(self.iterkeys()) - values = itervalues if py3k else lambda self: list(self.itervalues()) - items = iteritems if py3k else lambda self: list(self.iteritems()) - allitems = iterallitems if py3k else lambda self: list(self.iterallitems()) + def keys(self): return self.dict.keys() + + if py3k: + def values(self): return (v[-1] for v in self.dict.values()) + def items(self): return ((k, v[-1]) for k, v in self.dict.items()) + def allitems(self): + return ((k, v) for k, vl in self.dict.items() for v in vl) + iterkeys = keys + itervalues = values + iteritems = items + iterallitems = allitems + + else: + def values(self): return [v[-1] for v in self.dict.values()] + def items(self): return [(k, v[-1]) for k, v in self.dict.items()] + def iterkeys(self): return self.dict.iterkeys() + def itervalues(self): return (v[-1] for v in self.dict.itervalues()) + def iteritems(self): + return ((k, v[-1]) for k, v in self.dict.iteritems()) + def iterallitems(self): + return ((k, v) for k, vl in self.dict.iteritems() for v in vl) + def allitems(self): + return [(k, v) for k, vl in self.dict.iteritems() for v in vl] def get(self, key, default=None, index=-1, type=None): ''' Return the most recent value for a key. @@ -1600,7 +1733,7 @@ class MultiDict(DictMixin): try: val = self.dict[key][index] return type(val) if type else val - except Exception, e: + except Exception: pass return default @@ -1626,25 +1759,45 @@ class FormsDict(MultiDict): ''' This :class:`MultiDict` subclass is used to store request form data. Additionally to the normal dict-like item access methods (which return unmodified data as native strings), this container also supports - attribute-like access to its values. Attribues are automatiically de- or - recoded to match :attr:`input_encoding` (default: 'utf8'). Missing + attribute-like access to its values. Attributes are automatically de- + or recoded to match :attr:`input_encoding` (default: 'utf8'). Missing attributes default to an empty string. ''' #: Encoding used for attribute values. input_encoding = 'utf8' + #: If true (default), unicode strings are first encoded with `latin1` + #: and then decoded to match :attr:`input_encoding`. + recode_unicode = True + + def _fix(self, s, encoding=None): + if isinstance(s, unicode) and self.recode_unicode: # Python 3 WSGI + s = s.encode('latin1') + if isinstance(s, bytes): # Python 2 WSGI + return s.decode(encoding or self.input_encoding) + return s + + def decode(self, encoding=None): + ''' Returns a copy with all keys and values de- or recoded to match + :attr:`input_encoding`. Some libraries (e.g. WTForms) want a + unicode dictionary. ''' + copy = FormsDict() + enc = copy.input_encoding = encoding or self.input_encoding + copy.recode_unicode = False + for key, value in self.allitems(): + copy.append(self._fix(key, enc), self._fix(value, enc)) + return copy def getunicode(self, name, default=None, encoding=None): - value, enc = self.get(name, default), encoding or self.input_encoding try: - if isinstance(value, bytes): # Python 2 WSGI - return value.decode(enc) - elif isinstance(value, unicode): # Python 3 WSGI - return value.encode('latin1').decode(enc) - return value - except UnicodeError, e: + return self._fix(self[name], encoding) + except (UnicodeError, KeyError): return default - def __getattr__(self, name): return self.getunicode(name, default=u'') + def __getattr__(self, name, default=unicode()): + # Without this guard, pickle generates a cryptic TypeError: + if name.startswith('__') and name.endswith('__'): + return super(FormsDict, self).__getattr__(name) + return self.getunicode(name, default=default) class HeaderDict(MultiDict): @@ -1666,7 +1819,7 @@ class HeaderDict(MultiDict): def get(self, key, default=None, index=-1): return MultiDict.get(self, _hkey(key), default, index) def filter(self, names): - for name in map(_hkey, names): + for name in [_hkey(n) for n in names]: if name in self.dict: del self.dict[name] @@ -1682,7 +1835,7 @@ class WSGIHeaderDict(DictMixin): Currently PEP 333, 444 and 3333 are supported. (PEP 444 is the only one that uses non-native strings.) ''' - #: List of keys that do not have a 'HTTP_' prefix. + #: List of keys that do not have a ``HTTP_`` prefix. cgikeys = ('CONTENT_TYPE', 'CONTENT_LENGTH') def __init__(self, environ): @@ -1749,7 +1902,7 @@ class ConfigDict(dict): if key in self: del self[key] def __call__(self, *a, **ka): - for key, value in dict(*a, **ka).iteritems(): setattr(self, key, value) + for key, value in dict(*a, **ka).items(): setattr(self, key, value) return self @@ -1770,17 +1923,103 @@ class AppStack(list): class WSGIFileWrapper(object): - def __init__(self, fp, buffer_size=1024*64): - self.fp, self.buffer_size = fp, buffer_size - for attr in ('fileno', 'close', 'read', 'readlines'): - if hasattr(fp, attr): setattr(self, attr, getattr(fp, attr)) + def __init__(self, fp, buffer_size=1024*64): + self.fp, self.buffer_size = fp, buffer_size + for attr in ('fileno', 'close', 'read', 'readlines', 'tell', 'seek'): + if hasattr(fp, attr): setattr(self, attr, getattr(fp, attr)) + + def __iter__(self): + buff, read = self.buffer_size, self.read + while True: + part = read(buff) + if not part: return + yield part + + +class ResourceManager(object): + ''' This class manages a list of search paths and helps to find and open + application-bound resources (files). + + :param base: default value for :meth:`add_path` calls. + :param opener: callable used to open resources. + :param cachemode: controls which lookups are cached. One of 'all', + 'found' or 'none'. + ''' + + def __init__(self, base='./', opener=open, cachemode='all'): + self.opener = open + self.base = base + self.cachemode = cachemode - def __iter__(self): - read, buff = self.fp.read, self.buffer_size - while True: - part = read(buff) - if not part: break - yield part + #: A list of search paths. See :meth:`add_path` for details. + self.path = [] + #: A cache for resolved paths. ``res.cache.clear()`` clears the cache. + self.cache = {} + + def add_path(self, path, base=None, index=None, create=False): + ''' Add a new path to the list of search paths. Return False if the + path does not exist. + + :param path: The new search path. Relative paths are turned into + an absolute and normalized form. If the path looks like a file + (not ending in `/`), the filename is stripped off. + :param base: Path used to absolutize relative search paths. + Defaults to :attr:`base` which defaults to ``os.getcwd()``. + :param index: Position within the list of search paths. Defaults + to last index (appends to the list). + + The `base` parameter makes it easy to reference files installed + along with a python module or package:: + + res.add_path('./resources/', __file__) + ''' + base = os.path.abspath(os.path.dirname(base or self.base)) + path = os.path.abspath(os.path.join(base, os.path.dirname(path))) + path += os.sep + if path in self.path: + self.path.remove(path) + if create and not os.path.isdir(path): + os.makedirs(path) + if index is None: + self.path.append(path) + else: + self.path.insert(index, path) + self.cache.clear() + return os.path.exists(path) + + def __iter__(self): + ''' Iterate over all existing files in all registered paths. ''' + search = self.path[:] + while search: + path = search.pop() + if not os.path.isdir(path): continue + for name in os.listdir(path): + full = os.path.join(path, name) + if os.path.isdir(full): search.append(full) + else: yield full + + def lookup(self, name): + ''' Search for a resource and return an absolute file path, or `None`. + + The :attr:`path` list is searched in order. The first match is + returend. Symlinks are followed. The result is cached to speed up + future lookups. ''' + if name not in self.cache or DEBUG: + for path in self.path: + fpath = os.path.join(path, name) + if os.path.isfile(fpath): + if self.cachemode in ('all', 'found'): + self.cache[name] = fpath + return fpath + if self.cachemode == 'all': + self.cache[name] = None + return self.cache[name] + + def open(self, name, mode='r', *args, **kwargs): + ''' Find a resource and return a file object, or raise IOError. ''' + fname = self.lookup(name) + if not fname: raise IOError("Resource %r not found." % name) + return self.opener(name, mode=mode, *args, **kwargs) @@ -1803,7 +2042,20 @@ def redirect(url, code=None): if code is None: code = 303 if request.get('SERVER_PROTOCOL') == "HTTP/1.1" else 302 location = urljoin(request.url, url) - raise HTTPResponse("", status=code, header=dict(Location=location)) + res = HTTPResponse("", status=code, Location=location) + if response._cookies: + res._cookies = response._cookies + raise res + + +def _file_iter_range(fp, offset, bytes, maxread=1024*1024): + ''' Yield chunks from a range in a file. No chunk is bigger than maxread.''' + fp.seek(offset) + while bytes > 0: + part = fp.read(min(bytes, maxread)) + if not part: break + bytes -= len(part) + yield part def static_file(filename, root, mimetype='auto', download=False): @@ -1814,7 +2066,7 @@ def static_file(filename, root, mimetype='auto', download=False): """ root = os.path.abspath(root) + os.sep filename = os.path.abspath(os.path.join(root, filename.strip('/\\'))) - header = dict() + headers = dict() if not filename.startswith(root): return HTTPError(403, "Access denied.") @@ -1825,29 +2077,41 @@ def static_file(filename, root, mimetype='auto', download=False): if mimetype == 'auto': mimetype, encoding = mimetypes.guess_type(filename) - if mimetype: header['Content-Type'] = mimetype - if encoding: header['Content-Encoding'] = encoding + if mimetype: headers['Content-Type'] = mimetype + if encoding: headers['Content-Encoding'] = encoding elif mimetype: - header['Content-Type'] = mimetype + headers['Content-Type'] = mimetype if download: download = os.path.basename(filename if download == True else download) - header['Content-Disposition'] = 'attachment; filename="%s"' % download + headers['Content-Disposition'] = 'attachment; filename="%s"' % download stats = os.stat(filename) - header['Content-Length'] = stats.st_size + headers['Content-Length'] = clen = stats.st_size lm = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(stats.st_mtime)) - header['Last-Modified'] = lm + headers['Last-Modified'] = lm ims = request.environ.get('HTTP_IF_MODIFIED_SINCE') if ims: ims = parse_date(ims.split(";")[0].strip()) if ims is not None and ims >= int(stats.st_mtime): - header['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime()) - return HTTPResponse(status=304, header=header) + headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime()) + return HTTPResponse(status=304, **headers) body = '' if request.method == 'HEAD' else open(filename, 'rb') - return HTTPResponse(body, header=header) + + headers["Accept-Ranges"] = "bytes" + ranges = request.environ.get('HTTP_RANGE') + if 'HTTP_RANGE' in request.environ: + ranges = list(parse_range_header(request.environ['HTTP_RANGE'], clen)) + if not ranges: + return HTTPError(416, "Requested Range Not Satisfiable") + offset, end = ranges[0] + headers["Content-Range"] = "bytes %d-%d/%d" % (offset, end-1, clen) + headers["Content-Length"] = str(end-offset) + if body: body = _file_iter_range(body, offset, end-offset) + return HTTPResponse(body, status=206, **headers) + return HTTPResponse(body, **headers) @@ -1880,15 +2144,42 @@ def parse_auth(header): try: method, data = header.split(None, 1) if method.lower() == 'basic': - #TODO: Add 2to3 save base64[encode/decode] functions. user, pwd = touni(base64.b64decode(tob(data))).split(':',1) return user, pwd except (KeyError, ValueError): return None +def parse_range_header(header, maxlen=0): + ''' Yield (start, end) ranges parsed from a HTTP Range header. Skip + unsatisfiable ranges. The end index is non-inclusive.''' + if not header or header[:6] != 'bytes=': return + ranges = [r.split('-', 1) for r in header[6:].split(',') if '-' in r] + for start, end in ranges: + try: + if not start: # bytes=-100 -> last 100 bytes + start, end = max(0, maxlen-int(end)), maxlen + elif not end: # bytes=100- -> all but the first 99 bytes + start, end = int(start), maxlen + else: # bytes=100-200 -> bytes 100-200 (inclusive) + start, end = int(start), min(int(end)+1, maxlen) + if 0 <= start < end <= maxlen: + yield start, end + except ValueError: + pass + +def _parse_qsl(qs): + r = [] + for pair in qs.replace(';','&').split('&'): + if not pair: continue + nv = pair.split('=', 1) + if len(nv) != 2: nv.append('') + key = urlunquote(nv[0].replace('+', ' ')) + value = urlunquote(nv[1].replace('+', ' ')) + r.append((key, value)) + return r def _lscmp(a, b): - ''' Compares two strings in a cryptographically save way: + ''' Compares two strings in a cryptographically safe way: Runtime is not affected by length of common prefix. ''' return not sum(0 if x==y else 1 for x, y in zip(a, b)) and len(a) == len(b) @@ -1988,7 +2279,7 @@ def validate(**vkargs): def decorator(func): @functools.wraps(func) def wrapper(*args, **kargs): - for key, value in vkargs.iteritems(): + for key, value in vkargs.items(): if key not in kargs: abort(403, 'Missing parameter: %s' % key) try: @@ -2014,6 +2305,9 @@ def auth_basic(check, realm="private", text="Access denied"): return decorator +# Shortcuts for common Bottle methods. +# They all refer to the current default application. + def make_default_app_wrapper(name): ''' Return a callable that relays calls to the current default app. ''' @functools.wraps(getattr(Bottle, name)) @@ -2021,12 +2315,18 @@ def make_default_app_wrapper(name): return getattr(app(), name)(*a, **ka) return wrapper +route = make_default_app_wrapper('route') +get = make_default_app_wrapper('get') +post = make_default_app_wrapper('post') +put = make_default_app_wrapper('put') +delete = make_default_app_wrapper('delete') +error = make_default_app_wrapper('error') +mount = make_default_app_wrapper('mount') +hook = make_default_app_wrapper('hook') +install = make_default_app_wrapper('install') +uninstall = make_default_app_wrapper('uninstall') +url = make_default_app_wrapper('get_url') -for name in '''route get post put delete error mount - hook install uninstall'''.split(): - globals()[name] = make_default_app_wrapper(name) -url = make_default_app_wrapper('get_url') -del name @@ -2091,6 +2391,12 @@ class CherryPyServer(ServerAdapter): server.stop() +class WaitressServer(ServerAdapter): + def run(self, handler): + from waitress import serve + serve(handler, host=self.host, port=self.port) + + class PasteServer(ServerAdapter): def run(self, handler): # pragma: no cover from paste import httpserver @@ -2120,8 +2426,8 @@ class FapwsServer(ServerAdapter): evwsgi.start(self.host, port) # fapws3 never releases the GIL. Complain upstream. I tried. No luck. if 'BOTTLE_CHILD' in os.environ and not self.quiet: - print "WARNING: Auto-reloading does not work with Fapws3." - print " (Fapws3 breaks python thread support)" + _stderr("WARNING: Auto-reloading does not work with Fapws3.\n") + _stderr(" (Fapws3 breaks python thread support)\n") evwsgi.set_base_module(base) def app(environ, start_response): environ['wsgi.multiprocess'] = False @@ -2178,16 +2484,17 @@ class DieselServer(ServerAdapter): class GeventServer(ServerAdapter): """ Untested. Options: - * `monkey` (default: True) fixes the stdlib to use greenthreads. * `fast` (default: False) uses libevent's http server, but has some issues: No streaming, no pipelining, no SSL. """ def run(self, handler): - from gevent import wsgi as wsgi_fast, pywsgi, monkey, local - if self.options.get('monkey', True): - if not threading.local is local.local: monkey.patch_all() - wsgi = wsgi_fast if self.options.get('fast') else pywsgi - wsgi.WSGIServer((self.host, self.port), handler).serve_forever() + from gevent import wsgi, pywsgi, local + if not isinstance(_lctx, local.local): + msg = "Bottle requires gevent.monkey.patch_all() (before import)" + raise RuntimeError(msg) + if not self.options.get('fast'): wsgi = pywsgi + log = None if self.quiet else 'default' + wsgi.WSGIServer((self.host, self.port), handler, log=log).serve_forever() class GunicornServer(ServerAdapter): @@ -2212,7 +2519,12 @@ class EventletServer(ServerAdapter): """ Untested """ def run(self, handler): from eventlet import wsgi, listen - wsgi.server(listen((self.host, self.port)), handler) + try: + wsgi.server(listen((self.host, self.port)), handler, + log_output=(not self.quiet)) + except TypeError: + # Fallback, if we have old version of eventlet + wsgi.server(listen((self.host, self.port)), handler) class RocketServer(ServerAdapter): @@ -2232,7 +2544,7 @@ class BjoernServer(ServerAdapter): class AutoServer(ServerAdapter): """ Untested. """ - adapters = [PasteServer, CherryPyServer, TwistedServer, WSGIRefServer] + adapters = [WaitressServer, PasteServer, TwistedServer, CherryPyServer, WSGIRefServer] def run(self, handler): for sa in self.adapters: try: @@ -2244,6 +2556,7 @@ server_names = { 'cgi': CGIServer, 'flup': FlupFCGIServer, 'wsgiref': WSGIRefServer, + 'waitress': WaitressServer, 'cherrypy': CherryPyServer, 'paste': PasteServer, 'fapws3': FapwsServer, @@ -2303,8 +2616,10 @@ def load_app(target): default_app.remove(tmp) # Remove the temporary added default application NORUN = nr_old +_debug = debug def run(app=None, server='wsgiref', host='127.0.0.1', port=8080, - interval=1, reloader=False, quiet=False, plugins=None, **kargs): + interval=1, reloader=False, quiet=False, plugins=None, + debug=False, **kargs): """ Start a server instance. This method blocks until the server terminates. :param app: WSGI application or target string supported by @@ -2324,6 +2639,7 @@ def run(app=None, server='wsgiref', host='127.0.0.1', port=8080, if NORUN: return if reloader and not os.environ.get('BOTTLE_CHILD'): try: + lockfile = None fd, lockfile = tempfile.mkstemp(prefix='bottle.', suffix='.lock') os.close(fd) # We only need this file to exist. We never write to it while os.path.exists(lockfile): @@ -2345,9 +2661,8 @@ def run(app=None, server='wsgiref', host='127.0.0.1', port=8080, os.unlink(lockfile) return - stderr = sys.stderr.write - try: + _debug(debug) app = app or default_app() if isinstance(app, basestring): app = load_app(app) @@ -2368,9 +2683,9 @@ def run(app=None, server='wsgiref', host='127.0.0.1', port=8080, server.quiet = server.quiet or quiet if not server.quiet: - stderr("Bottle server starting up (using %s)...\n" % repr(server)) - stderr("Listening on http://%s:%d/\n" % (server.host, server.port)) - stderr("Hit Ctrl-C to quit.\n\n") + _stderr("Bottle v%s server starting up (using %s)...\n" % (__version__, repr(server))) + _stderr("Listening on http://%s:%d/\n" % (server.host, server.port)) + _stderr("Hit Ctrl-C to quit.\n\n") if reloader: lockfile = os.environ.get('BOTTLE_LOCKFILE') @@ -2383,12 +2698,15 @@ def run(app=None, server='wsgiref', host='127.0.0.1', port=8080, server.run(app) except KeyboardInterrupt: pass - except (SyntaxError, ImportError): + except (SystemExit, MemoryError): + raise + except: if not reloader: raise - if not getattr(server, 'quiet', False): print_exc() + if not getattr(server, 'quiet', quiet): + print_exc() + time.sleep(interval) sys.exit(3) - finally: - if not getattr(server, 'quiet', False): stderr('Shutdown...\n') + class FileCheckerThread(threading.Thread): @@ -2406,7 +2724,7 @@ class FileCheckerThread(threading.Thread): mtime = lambda path: os.stat(path).st_mtime files = dict() - for module in sys.modules.values(): + for module in list(sys.modules.values()): path = getattr(module, '__file__', '') if path[-4:] in ('.pyo', '.pyc'): path = path[:-1] if path and exists(path): files[path] = mtime(path) @@ -2416,20 +2734,20 @@ class FileCheckerThread(threading.Thread): or mtime(self.lockfile) < time.time() - self.interval - 5: self.status = 'error' thread.interrupt_main() - for path, lmtime in files.iteritems(): + for path, lmtime in list(files.items()): if not exists(path) or mtime(path) > lmtime: self.status = 'reload' thread.interrupt_main() break time.sleep(self.interval) - + def __enter__(self): self.start() - + def __exit__(self, exc_type, exc_val, exc_tb): if not self.status: self.status = 'exit' # silent exit self.join() - return issubclass(exc_type, KeyboardInterrupt) + return exc_type is not None and issubclass(exc_type, KeyboardInterrupt) @@ -2465,7 +2783,7 @@ class BaseTemplate(object): self.name = name self.source = source.read() if hasattr(source, 'read') else source self.filename = source.filename if hasattr(source, 'filename') else None - self.lookup = map(os.path.abspath, lookup) + self.lookup = [os.path.abspath(x) for x in lookup] self.encoding = encoding self.settings = self.settings.copy() # Copy from class variable self.settings.update(settings) # Apply @@ -2481,11 +2799,19 @@ class BaseTemplate(object): def search(cls, name, lookup=[]): """ Search name in all directories specified in lookup. First without, then with common extensions. Return first hit. """ - if os.path.isfile(name): return name + if not lookup: + depr('The template lookup path list should not be empty.') + lookup = ['.'] + + if os.path.isabs(name) and os.path.isfile(name): + depr('Absolute template path names are deprecated.') + return os.path.abspath(name) + for spath in lookup: - fname = os.path.join(spath, name) - if os.path.isfile(fname): - return fname + spath = os.path.abspath(spath) + os.sep + fname = os.path.abspath(os.path.join(spath, name)) + if not fname.startswith(spath): continue + if os.path.isfile(fname): return fname for ext in cls.extensions: if os.path.isfile('%s.%s' % (fname, ext)): return '%s.%s' % (fname, ext) @@ -2577,16 +2903,17 @@ class Jinja2Template(BaseTemplate): def loader(self, name): fname = self.search(name, self.lookup) - if fname: - with open(fname, "rb") as f: - return f.read().decode(self.encoding) + if not fname: return + with open(fname, "rb") as f: + return f.read().decode(self.encoding) class SimpleTALTemplate(BaseTemplate): - ''' Untested! ''' + ''' Deprecated, do not use. ''' def prepare(self, **options): + depr('The SimpleTAL template handler is deprecated'\ + ' and will be removed in 0.12') from simpletal import simpleTAL - # TODO: add option to load METAL files during render if self.source: self.tpl = simpleTAL.compileHTMLTemplate(self.source) else: @@ -2596,7 +2923,6 @@ class SimpleTALTemplate(BaseTemplate): def render(self, *args, **kwargs): from simpletal import simpleTALES for dictarg in args: kwargs.update(dictarg) - # TODO: maybe reuse a context instead of always creating one context = simpleTALES.Context() for k,v in self.defaults.items(): context.addGlobal(k, v) @@ -2684,13 +3010,13 @@ class SimpleTemplate(BaseTemplate): for line in template.splitlines(True): lineno += 1 - line = line if isinstance(line, unicode)\ - else unicode(line, encoding=self.encoding) + line = touni(line, self.encoding) + sline = line.lstrip() if lineno <= 2: - m = re.search(r"%.*coding[:=]\s*([-\w\.]+)", line) + m = re.match(r"%\s*#.*coding[:=]\s*([-\w.]+)", sline) if m: self.encoding = m.group(1) if m: line = line.replace('coding','coding (removed)') - if line.strip()[:2].count('%') == 1: + if sline and sline[0] == '%' and sline[:2] != '%%': line = line.split('%',1)[1].lstrip() # Full line following the % cline = self.split_comment(line).strip() cmd = re.split(r'[^a-zA-Z0-9_]', cline)[0] @@ -2768,21 +3094,22 @@ def template(*args, **kwargs): or directly (as keyword arguments). ''' tpl = args[0] if args else None - template_adapter = kwargs.pop('template_adapter', SimpleTemplate) - if tpl not in TEMPLATES or DEBUG: + adapter = kwargs.pop('template_adapter', SimpleTemplate) + lookup = kwargs.pop('template_lookup', TEMPLATE_PATH) + tplid = (id(lookup), tpl) + if tplid not in TEMPLATES or DEBUG: settings = kwargs.pop('template_settings', {}) - lookup = kwargs.pop('template_lookup', TEMPLATE_PATH) - if isinstance(tpl, template_adapter): - TEMPLATES[tpl] = tpl - if settings: TEMPLATES[tpl].prepare(**settings) + if isinstance(tpl, adapter): + TEMPLATES[tplid] = tpl + if settings: TEMPLATES[tplid].prepare(**settings) elif "\n" in tpl or "{" in tpl or "%" in tpl or '$' in tpl: - TEMPLATES[tpl] = template_adapter(source=tpl, lookup=lookup, **settings) + TEMPLATES[tplid] = adapter(source=tpl, lookup=lookup, **settings) else: - TEMPLATES[tpl] = template_adapter(name=tpl, lookup=lookup, **settings) - if not TEMPLATES[tpl]: + TEMPLATES[tplid] = adapter(name=tpl, lookup=lookup, **settings) + if not TEMPLATES[tplid]: abort(500, 'Template (%s) not found' % tpl) for dictarg in args[1:]: kwargs.update(dictarg) - return TEMPLATES[tpl].render(kwargs) + return TEMPLATES[tplid].render(kwargs) mako_template = functools.partial(template, template_adapter=MakoTemplate) cheetah_template = functools.partial(template, template_adapter=CheetahTemplate) @@ -2839,17 +3166,16 @@ HTTP_CODES[428] = "Precondition Required" HTTP_CODES[429] = "Too Many Requests" HTTP_CODES[431] = "Request Header Fields Too Large" HTTP_CODES[511] = "Network Authentication Required" -_HTTP_STATUS_LINES = dict((k, '%d %s'%(k,v)) for (k,v) in HTTP_CODES.iteritems()) +_HTTP_STATUS_LINES = dict((k, '%d %s'%(k,v)) for (k,v) in HTTP_CODES.items()) #: The default template used for error pages. Override with @error() ERROR_PAGE_TEMPLATE = """ -%try: - %from bottle import DEBUG, HTTP_CODES, request, touni - %status_name = HTTP_CODES.get(e.status, 'Unknown').title() +%%try: + %%from %s import DEBUG, HTTP_CODES, request, touni <!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN"> <html> <head> - <title>Error {{e.status}}: {{status_name}}</title> + <title>Error: {{e.status}}</title> <style type="text/css"> html {background-color: #eee; font-family: sans;} body {background-color: #fff; border: 1px solid #ddd; @@ -2858,31 +3184,34 @@ ERROR_PAGE_TEMPLATE = """ </style> </head> <body> - <h1>Error {{e.status}}: {{status_name}}</h1> + <h1>Error: {{e.status}}</h1> <p>Sorry, the requested URL <tt>{{repr(request.url)}}</tt> caused an error:</p> - <pre>{{e.output}}</pre> - %if DEBUG and e.exception: + <pre>{{e.body}}</pre> + %%if DEBUG and e.exception: <h2>Exception:</h2> <pre>{{repr(e.exception)}}</pre> - %end - %if DEBUG and e.traceback: + %%end + %%if DEBUG and e.traceback: <h2>Traceback:</h2> <pre>{{e.traceback}}</pre> - %end + %%end </body> </html> -%except ImportError: +%%except ImportError: <b>ImportError:</b> Could not generate the error page. Please add bottle to the import path. -%end -""" +%%end +""" % __name__ -#: A thread-safe instance of :class:`Request` representing the `current` request. -request = Request() +#: A thread-safe instance of :class:`LocalRequest`. If accessed from within a +#: request callback, this instance always refers to the *current* request +#: (even on a multithreaded server). +request = LocalRequest() -#: A thread-safe instance of :class:`Response` used to build the HTTP response. -response = Response() +#: A thread-safe instance of :class:`LocalResponse`. It is used to change the +#: HTTP response for the *current* request. +response = LocalResponse() #: A thread-safe namespace. Not used by Bottle. local = threading.local() @@ -2894,29 +3223,29 @@ app.push() #: A virtual package that redirects import statements. #: Example: ``import bottle.ext.sqlite`` actually imports `bottle_sqlite`. -ext = _ImportRedirect(__name__+'.ext', 'bottle_%s').module +ext = _ImportRedirect('bottle.ext' if __name__ == '__main__' else __name__+".ext", 'bottle_%s').module if __name__ == '__main__': opt, args, parser = _cmd_options, _cmd_args, _cmd_parser if opt.version: - print 'Bottle', __version__; sys.exit(0) + _stdout('Bottle %s\n'%__version__) + sys.exit(0) if not args: parser.print_help() - print '\nError: No application specified.\n' + _stderr('\nError: No application specified.\n') sys.exit(1) - try: - sys.path.insert(0, '.') - sys.modules.setdefault('bottle', sys.modules['__main__']) - except (AttributeError, ImportError), e: - parser.error(e.args[0]) + sys.path.insert(0, '.') + sys.modules.setdefault('bottle', sys.modules['__main__']) + + host, port = (opt.bind or 'localhost'), 8080 + if ':' in host: + host, port = host.rsplit(':', 1) + + run(args[0], host=host, port=port, server=opt.server, + reloader=opt.reload, plugins=opt.plugin, debug=opt.debug) + - if opt.bind and ':' in opt.bind: - host, port = opt.bind.rsplit(':', 1) - else: - host, port = (opt.bind or 'localhost'), 8080 - debug(opt.debug) - run(args[0], host=host, port=port, server=opt.server, reloader=opt.reload, plugins=opt.plugin) # THE END diff --git a/module/lib/feedparser.py b/module/lib/feedparser.py deleted file mode 100644 index a746ed8f5..000000000 --- a/module/lib/feedparser.py +++ /dev/null @@ -1,3885 +0,0 @@ -#!/usr/bin/env python -"""Universal feed parser - -Handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds - -Visit http://feedparser.org/ for the latest version -Visit http://feedparser.org/docs/ for the latest documentation - -Required: Python 2.4 or later -Recommended: CJKCodecs and iconv_codec <http://cjkpython.i18n.org/> -""" - -__version__ = "5.0" -__license__ = """Copyright (c) 2002-2008, Mark Pilgrim, All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS' -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE.""" -__author__ = "Mark Pilgrim <http://diveintomark.org/>" -__contributors__ = ["Jason Diamond <http://injektilo.org/>", - "John Beimler <http://john.beimler.org/>", - "Fazal Majid <http://www.majid.info/mylos/weblog/>", - "Aaron Swartz <http://aaronsw.com/>", - "Kevin Marks <http://epeus.blogspot.com/>", - "Sam Ruby <http://intertwingly.net/>", - "Ade Oshineye <http://blog.oshineye.com/>", - "Martin Pool <http://sourcefrog.net/>", - "Kurt McKee <http://kurtmckee.org/>"] -_debug = 0 - -# HTTP "User-Agent" header to send to servers when downloading feeds. -# If you are embedding feedparser in a larger application, you should -# change this to your application name and URL. -USER_AGENT = "UniversalFeedParser/%s +http://feedparser.org/" % __version__ - -# HTTP "Accept" header to send to servers when downloading feeds. If you don't -# want to send an Accept header, set this to None. -ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1" - -# List of preferred XML parsers, by SAX driver name. These will be tried first, -# but if they're not installed, Python will keep searching through its own list -# of pre-installed parsers until it finds one that supports everything we need. -PREFERRED_XML_PARSERS = ["drv_libxml2"] - -# If you want feedparser to automatically run HTML markup through HTML Tidy, set -# this to 1. Requires mxTidy <http://www.egenix.com/files/python/mxTidy.html> -# or utidylib <http://utidylib.berlios.de/>. -TIDY_MARKUP = 0 - -# List of Python interfaces for HTML Tidy, in order of preference. Only useful -# if TIDY_MARKUP = 1 -PREFERRED_TIDY_INTERFACES = ["uTidy", "mxTidy"] - -# If you want feedparser to automatically resolve all relative URIs, set this -# to 1. -RESOLVE_RELATIVE_URIS = 1 - -# If you want feedparser to automatically sanitize all potentially unsafe -# HTML content, set this to 1. -SANITIZE_HTML = 1 - -# ---------- Python 3 modules (make it work if possible) ---------- -try: - import rfc822 -except ImportError: - from email import _parseaddr as rfc822 - -try: - # Python 3.1 introduces bytes.maketrans and simultaneously - # deprecates string.maketrans; use bytes.maketrans if possible - _maketrans = bytes.maketrans -except (NameError, AttributeError): - import string - _maketrans = string.maketrans - -# base64 support for Atom feeds that contain embedded binary data -try: - import base64, binascii - # Python 3.1 deprecates decodestring in favor of decodebytes - _base64decode = getattr(base64, 'decodebytes', base64.decodestring) -except: - base64 = binascii = None - -def _s2bytes(s): - # Convert a UTF-8 str to bytes if the interpreter is Python 3 - try: - return bytes(s, 'utf8') - except (NameError, TypeError): - # In Python 2.5 and below, bytes doesn't exist (NameError) - # In Python 2.6 and above, bytes and str are the same (TypeError) - return s - -def _l2bytes(l): - # Convert a list of ints to bytes if the interpreter is Python 3 - try: - if bytes is not str: - # In Python 2.6 and above, this call won't raise an exception - # but it will return bytes([65]) as '[65]' instead of 'A' - return bytes(l) - raise NameError - except NameError: - return ''.join(map(chr, l)) - -# If you want feedparser to allow all URL schemes, set this to () -# List culled from Python's urlparse documentation at: -# http://docs.python.org/library/urlparse.html -# as well as from "URI scheme" at Wikipedia: -# https://secure.wikimedia.org/wikipedia/en/wiki/URI_scheme -# Many more will likely need to be added! -ACCEPTABLE_URI_SCHEMES = ( - 'file', 'ftp', 'gopher', 'h323', 'hdl', 'http', 'https', 'imap', 'mailto', - 'mms', 'news', 'nntp', 'prospero', 'rsync', 'rtsp', 'rtspu', 'sftp', - 'shttp', 'sip', 'sips', 'snews', 'svn', 'svn+ssh', 'telnet', 'wais', - # Additional common-but-unofficial schemes - 'aim', 'callto', 'cvs', 'facetime', 'feed', 'git', 'gtalk', 'irc', 'ircs', - 'irc6', 'itms', 'mms', 'msnim', 'skype', 'ssh', 'smb', 'svn', 'ymsg', -) -#ACCEPTABLE_URI_SCHEMES = () - -# ---------- required modules (should come with any Python distribution) ---------- -import sgmllib, re, sys, copy, urlparse, time, types, cgi, urllib, urllib2, datetime -try: - from io import BytesIO as _StringIO -except ImportError: - try: - from cStringIO import StringIO as _StringIO - except: - from StringIO import StringIO as _StringIO - -# ---------- optional modules (feedparser will work without these, but with reduced functionality) ---------- - -# gzip is included with most Python distributions, but may not be available if you compiled your own -try: - import gzip -except: - gzip = None -try: - import zlib -except: - zlib = None - -# If a real XML parser is available, feedparser will attempt to use it. feedparser has -# been tested with the built-in SAX parser, PyXML, and libxml2. On platforms where the -# Python distribution does not come with an XML parser (such as Mac OS X 10.2 and some -# versions of FreeBSD), feedparser will quietly fall back on regex-based parsing. -try: - import xml.sax - xml.sax.make_parser(PREFERRED_XML_PARSERS) # test for valid parsers - from xml.sax.saxutils import escape as _xmlescape - _XML_AVAILABLE = 1 -except: - _XML_AVAILABLE = 0 - def _xmlescape(data,entities={}): - data = data.replace('&', '&') - data = data.replace('>', '>') - data = data.replace('<', '<') - for char, entity in entities: - data = data.replace(char, entity) - return data - -# cjkcodecs and iconv_codec provide support for more character encodings. -# Both are available from http://cjkpython.i18n.org/ -try: - import cjkcodecs.aliases -except: - pass -try: - import iconv_codec -except: - pass - -# chardet library auto-detects character encodings -# Download from http://chardet.feedparser.org/ -try: - import chardet - if _debug: - import chardet.constants - chardet.constants._debug = 1 -except: - chardet = None - -# reversable htmlentitydefs mappings for Python 2.2 -try: - from htmlentitydefs import name2codepoint, codepoint2name -except: - import htmlentitydefs - name2codepoint={} - codepoint2name={} - for (name,codepoint) in htmlentitydefs.entitydefs.iteritems(): - if codepoint.startswith('&#'): codepoint=unichr(int(codepoint[2:-1])) - name2codepoint[name]=ord(codepoint) - codepoint2name[ord(codepoint)]=name - -# BeautifulSoup parser used for parsing microformats from embedded HTML content -# http://www.crummy.com/software/BeautifulSoup/ -# feedparser is tested with BeautifulSoup 3.0.x, but it might work with the -# older 2.x series. If it doesn't, and you can figure out why, I'll accept a -# patch and modify the compatibility statement accordingly. -try: - import BeautifulSoup -except: - BeautifulSoup = None - -# ---------- don't touch these ---------- -class ThingsNobodyCaresAboutButMe(Exception): pass -class CharacterEncodingOverride(ThingsNobodyCaresAboutButMe): pass -class CharacterEncodingUnknown(ThingsNobodyCaresAboutButMe): pass -class NonXMLContentType(ThingsNobodyCaresAboutButMe): pass -class UndeclaredNamespace(Exception): pass - -sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*') -sgmllib.special = re.compile('<!') -sgmllib.charref = re.compile('&#(\d+|[xX][0-9a-fA-F]+);') - -if sgmllib.endbracket.search(' <').start(0): - class EndBracketRegEx: - def __init__(self): - # Overriding the built-in sgmllib.endbracket regex allows the - # parser to find angle brackets embedded in element attributes. - self.endbracket = re.compile('''([^'"<>]|"[^"]*"(?=>|/|\s|\w+=)|'[^']*'(?=>|/|\s|\w+=))*(?=[<>])|.*?(?=[<>])''') - def search(self,string,index=0): - match = self.endbracket.match(string,index) - if match is not None: - # Returning a new object in the calling thread's context - # resolves a thread-safety. - return EndBracketMatch(match) - return None - class EndBracketMatch: - def __init__(self, match): - self.match = match - def start(self, n): - return self.match.end(n) - sgmllib.endbracket = EndBracketRegEx() - -SUPPORTED_VERSIONS = {'': 'unknown', - 'rss090': 'RSS 0.90', - 'rss091n': 'RSS 0.91 (Netscape)', - 'rss091u': 'RSS 0.91 (Userland)', - 'rss092': 'RSS 0.92', - 'rss093': 'RSS 0.93', - 'rss094': 'RSS 0.94', - 'rss20': 'RSS 2.0', - 'rss10': 'RSS 1.0', - 'rss': 'RSS (unknown version)', - 'atom01': 'Atom 0.1', - 'atom02': 'Atom 0.2', - 'atom03': 'Atom 0.3', - 'atom10': 'Atom 1.0', - 'atom': 'Atom (unknown version)', - 'cdf': 'CDF', - 'hotrss': 'Hot RSS' - } - -try: - UserDict = dict -except NameError: - # Python 2.1 does not have dict - from UserDict import UserDict - def dict(aList): - rc = {} - for k, v in aList: - rc[k] = v - return rc - -class FeedParserDict(UserDict): - keymap = {'channel': 'feed', - 'items': 'entries', - 'guid': 'id', - 'date': 'updated', - 'date_parsed': 'updated_parsed', - 'description': ['summary', 'subtitle'], - 'url': ['href'], - 'modified': 'updated', - 'modified_parsed': 'updated_parsed', - 'issued': 'published', - 'issued_parsed': 'published_parsed', - 'copyright': 'rights', - 'copyright_detail': 'rights_detail', - 'tagline': 'subtitle', - 'tagline_detail': 'subtitle_detail'} - def __getitem__(self, key): - if key == 'category': - return UserDict.__getitem__(self, 'tags')[0]['term'] - if key == 'enclosures': - norel = lambda link: FeedParserDict([(name,value) for (name,value) in link.items() if name!='rel']) - return [norel(link) for link in UserDict.__getitem__(self, 'links') if link['rel']=='enclosure'] - if key == 'license': - for link in UserDict.__getitem__(self, 'links'): - if link['rel']=='license' and link.has_key('href'): - return link['href'] - if key == 'categories': - return [(tag['scheme'], tag['term']) for tag in UserDict.__getitem__(self, 'tags')] - realkey = self.keymap.get(key, key) - if type(realkey) == types.ListType: - for k in realkey: - if UserDict.__contains__(self, k): - return UserDict.__getitem__(self, k) - if UserDict.__contains__(self, key): - return UserDict.__getitem__(self, key) - return UserDict.__getitem__(self, realkey) - - def __setitem__(self, key, value): - for k in self.keymap.keys(): - if key == k: - key = self.keymap[k] - if type(key) == types.ListType: - key = key[0] - return UserDict.__setitem__(self, key, value) - - def get(self, key, default=None): - if self.has_key(key): - return self[key] - else: - return default - - def setdefault(self, key, value): - if not self.has_key(key): - self[key] = value - return self[key] - - def has_key(self, key): - try: - return hasattr(self, key) or UserDict.__contains__(self, key) - except AttributeError: - return False - # This alias prevents the 2to3 tool from changing the semantics of the - # __contains__ function below and exhausting the maximum recursion depth - __has_key = has_key - - def __getattr__(self, key): - try: - return self.__dict__[key] - except KeyError: - pass - try: - assert not key.startswith('_') - return self.__getitem__(key) - except: - raise AttributeError, "object has no attribute '%s'" % key - - def __setattr__(self, key, value): - if key.startswith('_') or key == 'data': - self.__dict__[key] = value - else: - return self.__setitem__(key, value) - - def __contains__(self, key): - return self.__has_key(key) - -def zopeCompatibilityHack(): - global FeedParserDict - del FeedParserDict - def FeedParserDict(aDict=None): - rc = {} - if aDict: - rc.update(aDict) - return rc - -_ebcdic_to_ascii_map = None -def _ebcdic_to_ascii(s): - global _ebcdic_to_ascii_map - if not _ebcdic_to_ascii_map: - emap = ( - 0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15, - 16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31, - 128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7, - 144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26, - 32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33, - 38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94, - 45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63, - 186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34, - 195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,201, - 202,106,107,108,109,110,111,112,113,114,203,204,205,206,207,208, - 209,126,115,116,117,118,119,120,121,122,210,211,212,213,214,215, - 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231, - 123,65,66,67,68,69,70,71,72,73,232,233,234,235,236,237, - 125,74,75,76,77,78,79,80,81,82,238,239,240,241,242,243, - 92,159,83,84,85,86,87,88,89,90,244,245,246,247,248,249, - 48,49,50,51,52,53,54,55,56,57,250,251,252,253,254,255 - ) - _ebcdic_to_ascii_map = _maketrans( \ - _l2bytes(range(256)), _l2bytes(emap)) - return s.translate(_ebcdic_to_ascii_map) - -_cp1252 = { - unichr(128): unichr(8364), # euro sign - unichr(130): unichr(8218), # single low-9 quotation mark - unichr(131): unichr( 402), # latin small letter f with hook - unichr(132): unichr(8222), # double low-9 quotation mark - unichr(133): unichr(8230), # horizontal ellipsis - unichr(134): unichr(8224), # dagger - unichr(135): unichr(8225), # double dagger - unichr(136): unichr( 710), # modifier letter circumflex accent - unichr(137): unichr(8240), # per mille sign - unichr(138): unichr( 352), # latin capital letter s with caron - unichr(139): unichr(8249), # single left-pointing angle quotation mark - unichr(140): unichr( 338), # latin capital ligature oe - unichr(142): unichr( 381), # latin capital letter z with caron - unichr(145): unichr(8216), # left single quotation mark - unichr(146): unichr(8217), # right single quotation mark - unichr(147): unichr(8220), # left double quotation mark - unichr(148): unichr(8221), # right double quotation mark - unichr(149): unichr(8226), # bullet - unichr(150): unichr(8211), # en dash - unichr(151): unichr(8212), # em dash - unichr(152): unichr( 732), # small tilde - unichr(153): unichr(8482), # trade mark sign - unichr(154): unichr( 353), # latin small letter s with caron - unichr(155): unichr(8250), # single right-pointing angle quotation mark - unichr(156): unichr( 339), # latin small ligature oe - unichr(158): unichr( 382), # latin small letter z with caron - unichr(159): unichr( 376)} # latin capital letter y with diaeresis - -_urifixer = re.compile('^([A-Za-z][A-Za-z0-9+-.]*://)(/*)(.*?)') -def _urljoin(base, uri): - uri = _urifixer.sub(r'\1\3', uri) - try: - return urlparse.urljoin(base, uri) - except: - uri = urlparse.urlunparse([urllib.quote(part) for part in urlparse.urlparse(uri)]) - return urlparse.urljoin(base, uri) - -class _FeedParserMixin: - namespaces = {'': '', - 'http://backend.userland.com/rss': '', - 'http://blogs.law.harvard.edu/tech/rss': '', - 'http://purl.org/rss/1.0/': '', - 'http://my.netscape.com/rdf/simple/0.9/': '', - 'http://example.com/newformat#': '', - 'http://example.com/necho': '', - 'http://purl.org/echo/': '', - 'uri/of/echo/namespace#': '', - 'http://purl.org/pie/': '', - 'http://purl.org/atom/ns#': '', - 'http://www.w3.org/2005/Atom': '', - 'http://purl.org/rss/1.0/modules/rss091#': '', - - 'http://webns.net/mvcb/': 'admin', - 'http://purl.org/rss/1.0/modules/aggregation/': 'ag', - 'http://purl.org/rss/1.0/modules/annotate/': 'annotate', - 'http://media.tangent.org/rss/1.0/': 'audio', - 'http://backend.userland.com/blogChannelModule': 'blogChannel', - 'http://web.resource.org/cc/': 'cc', - 'http://backend.userland.com/creativeCommonsRssModule': 'creativeCommons', - 'http://purl.org/rss/1.0/modules/company': 'co', - 'http://purl.org/rss/1.0/modules/content/': 'content', - 'http://my.theinfo.org/changed/1.0/rss/': 'cp', - 'http://purl.org/dc/elements/1.1/': 'dc', - 'http://purl.org/dc/terms/': 'dcterms', - 'http://purl.org/rss/1.0/modules/email/': 'email', - 'http://purl.org/rss/1.0/modules/event/': 'ev', - 'http://rssnamespace.org/feedburner/ext/1.0': 'feedburner', - 'http://freshmeat.net/rss/fm/': 'fm', - 'http://xmlns.com/foaf/0.1/': 'foaf', - 'http://www.w3.org/2003/01/geo/wgs84_pos#': 'geo', - 'http://postneo.com/icbm/': 'icbm', - 'http://purl.org/rss/1.0/modules/image/': 'image', - 'http://www.itunes.com/DTDs/PodCast-1.0.dtd': 'itunes', - 'http://example.com/DTDs/PodCast-1.0.dtd': 'itunes', - 'http://purl.org/rss/1.0/modules/link/': 'l', - 'http://search.yahoo.com/mrss': 'media', - #Version 1.1.2 of the Media RSS spec added the trailing slash on the namespace - 'http://search.yahoo.com/mrss/': 'media', - 'http://madskills.com/public/xml/rss/module/pingback/': 'pingback', - 'http://prismstandard.org/namespaces/1.2/basic/': 'prism', - 'http://www.w3.org/1999/02/22-rdf-syntax-ns#': 'rdf', - 'http://www.w3.org/2000/01/rdf-schema#': 'rdfs', - 'http://purl.org/rss/1.0/modules/reference/': 'ref', - 'http://purl.org/rss/1.0/modules/richequiv/': 'reqv', - 'http://purl.org/rss/1.0/modules/search/': 'search', - 'http://purl.org/rss/1.0/modules/slash/': 'slash', - 'http://schemas.xmlsoap.org/soap/envelope/': 'soap', - 'http://purl.org/rss/1.0/modules/servicestatus/': 'ss', - 'http://hacks.benhammersley.com/rss/streaming/': 'str', - 'http://purl.org/rss/1.0/modules/subscription/': 'sub', - 'http://purl.org/rss/1.0/modules/syndication/': 'sy', - 'http://schemas.pocketsoap.com/rss/myDescModule/': 'szf', - 'http://purl.org/rss/1.0/modules/taxonomy/': 'taxo', - 'http://purl.org/rss/1.0/modules/threading/': 'thr', - 'http://purl.org/rss/1.0/modules/textinput/': 'ti', - 'http://madskills.com/public/xml/rss/module/trackback/':'trackback', - 'http://wellformedweb.org/commentAPI/': 'wfw', - 'http://purl.org/rss/1.0/modules/wiki/': 'wiki', - 'http://www.w3.org/1999/xhtml': 'xhtml', - 'http://www.w3.org/1999/xlink': 'xlink', - 'http://www.w3.org/XML/1998/namespace': 'xml' -} - _matchnamespaces = {} - - can_be_relative_uri = ['link', 'id', 'wfw_comment', 'wfw_commentrss', 'docs', 'url', 'href', 'comments', 'icon', 'logo'] - can_contain_relative_uris = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description'] - can_contain_dangerous_markup = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description'] - html_types = ['text/html', 'application/xhtml+xml'] - - def __init__(self, baseuri=None, baselang=None, encoding='utf-8'): - if _debug: sys.stderr.write('initializing FeedParser\n') - if not self._matchnamespaces: - for k, v in self.namespaces.items(): - self._matchnamespaces[k.lower()] = v - self.feeddata = FeedParserDict() # feed-level data - self.encoding = encoding # character encoding - self.entries = [] # list of entry-level data - self.version = '' # feed type/version, see SUPPORTED_VERSIONS - self.namespacesInUse = {} # dictionary of namespaces defined by the feed - - # the following are used internally to track state; - # this is really out of control and should be refactored - self.infeed = 0 - self.inentry = 0 - self.incontent = 0 - self.intextinput = 0 - self.inimage = 0 - self.inauthor = 0 - self.incontributor = 0 - self.inpublisher = 0 - self.insource = 0 - self.sourcedata = FeedParserDict() - self.contentparams = FeedParserDict() - self._summaryKey = None - self.namespacemap = {} - self.elementstack = [] - self.basestack = [] - self.langstack = [] - self.baseuri = baseuri or '' - self.lang = baselang or None - self.svgOK = 0 - self.hasTitle = 0 - if baselang: - self.feeddata['language'] = baselang.replace('_','-') - - def unknown_starttag(self, tag, attrs): - if _debug: sys.stderr.write('start %s with %s\n' % (tag, attrs)) - # normalize attrs - attrs = [(k.lower(), v) for k, v in attrs] - attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs] - # the sgml parser doesn't handle entities in attributes, but - # strict xml parsers do -- account for this difference - if isinstance(self, _LooseFeedParser): - attrs = [(k, v.replace('&', '&')) for k, v in attrs] - - # track xml:base and xml:lang - attrsD = dict(attrs) - baseuri = attrsD.get('xml:base', attrsD.get('base')) or self.baseuri - if type(baseuri) != type(u''): - try: - baseuri = unicode(baseuri, self.encoding) - except: - baseuri = unicode(baseuri, 'iso-8859-1') - # ensure that self.baseuri is always an absolute URI that - # uses a whitelisted URI scheme (e.g. not `javscript:`) - if self.baseuri: - self.baseuri = _makeSafeAbsoluteURI(self.baseuri, baseuri) or self.baseuri - else: - self.baseuri = _urljoin(self.baseuri, baseuri) - lang = attrsD.get('xml:lang', attrsD.get('lang')) - if lang == '': - # xml:lang could be explicitly set to '', we need to capture that - lang = None - elif lang is None: - # if no xml:lang is specified, use parent lang - lang = self.lang - if lang: - if tag in ('feed', 'rss', 'rdf:RDF'): - self.feeddata['language'] = lang.replace('_','-') - self.lang = lang - self.basestack.append(self.baseuri) - self.langstack.append(lang) - - # track namespaces - for prefix, uri in attrs: - if prefix.startswith('xmlns:'): - self.trackNamespace(prefix[6:], uri) - elif prefix == 'xmlns': - self.trackNamespace(None, uri) - - # track inline content - if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'): - if tag in ['xhtml:div', 'div']: return # typepad does this 10/2007 - # element declared itself as escaped markup, but it isn't really - self.contentparams['type'] = 'application/xhtml+xml' - if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml': - if tag.find(':') <> -1: - prefix, tag = tag.split(':', 1) - namespace = self.namespacesInUse.get(prefix, '') - if tag=='math' and namespace=='http://www.w3.org/1998/Math/MathML': - attrs.append(('xmlns',namespace)) - if tag=='svg' and namespace=='http://www.w3.org/2000/svg': - attrs.append(('xmlns',namespace)) - if tag == 'svg': self.svgOK += 1 - return self.handle_data('<%s%s>' % (tag, self.strattrs(attrs)), escape=0) - - # match namespaces - if tag.find(':') <> -1: - prefix, suffix = tag.split(':', 1) - else: - prefix, suffix = '', tag - prefix = self.namespacemap.get(prefix, prefix) - if prefix: - prefix = prefix + '_' - - # special hack for better tracking of empty textinput/image elements in illformed feeds - if (not prefix) and tag not in ('title', 'link', 'description', 'name'): - self.intextinput = 0 - if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'href', 'width', 'height'): - self.inimage = 0 - - # call special handler (if defined) or default handler - methodname = '_start_' + prefix + suffix - try: - method = getattr(self, methodname) - return method(attrsD) - except AttributeError: - # Since there's no handler or something has gone wrong we explicitly add the element and its attributes - unknown_tag = prefix + suffix - if len(attrsD) == 0: - # No attributes so merge it into the encosing dictionary - return self.push(unknown_tag, 1) - else: - # Has attributes so create it in its own dictionary - context = self._getContext() - context[unknown_tag] = attrsD - - def unknown_endtag(self, tag): - if _debug: sys.stderr.write('end %s\n' % tag) - # match namespaces - if tag.find(':') <> -1: - prefix, suffix = tag.split(':', 1) - else: - prefix, suffix = '', tag - prefix = self.namespacemap.get(prefix, prefix) - if prefix: - prefix = prefix + '_' - if suffix == 'svg' and self.svgOK: self.svgOK -= 1 - - # call special handler (if defined) or default handler - methodname = '_end_' + prefix + suffix - try: - if self.svgOK: raise AttributeError() - method = getattr(self, methodname) - method() - except AttributeError: - self.pop(prefix + suffix) - - # track inline content - if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'): - # element declared itself as escaped markup, but it isn't really - if tag in ['xhtml:div', 'div']: return # typepad does this 10/2007 - self.contentparams['type'] = 'application/xhtml+xml' - if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml': - tag = tag.split(':')[-1] - self.handle_data('</%s>' % tag, escape=0) - - # track xml:base and xml:lang going out of scope - if self.basestack: - self.basestack.pop() - if self.basestack and self.basestack[-1]: - self.baseuri = self.basestack[-1] - if self.langstack: - self.langstack.pop() - if self.langstack: # and (self.langstack[-1] is not None): - self.lang = self.langstack[-1] - - def handle_charref(self, ref): - # called for each character reference, e.g. for ' ', ref will be '160' - if not self.elementstack: return - ref = ref.lower() - if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'): - text = '&#%s;' % ref - else: - if ref[0] == 'x': - c = int(ref[1:], 16) - else: - c = int(ref) - text = unichr(c).encode('utf-8') - self.elementstack[-1][2].append(text) - - def handle_entityref(self, ref): - # called for each entity reference, e.g. for '©', ref will be 'copy' - if not self.elementstack: return - if _debug: sys.stderr.write('entering handle_entityref with %s\n' % ref) - if ref in ('lt', 'gt', 'quot', 'amp', 'apos'): - text = '&%s;' % ref - elif ref in self.entities.keys(): - text = self.entities[ref] - if text.startswith('&#') and text.endswith(';'): - return self.handle_entityref(text) - else: - try: name2codepoint[ref] - except KeyError: text = '&%s;' % ref - else: text = unichr(name2codepoint[ref]).encode('utf-8') - self.elementstack[-1][2].append(text) - - def handle_data(self, text, escape=1): - # called for each block of plain text, i.e. outside of any tag and - # not containing any character or entity references - if not self.elementstack: return - if escape and self.contentparams.get('type') == 'application/xhtml+xml': - text = _xmlescape(text) - self.elementstack[-1][2].append(text) - - def handle_comment(self, text): - # called for each comment, e.g. <!-- insert message here --> - pass - - def handle_pi(self, text): - # called for each processing instruction, e.g. <?instruction> - pass - - def handle_decl(self, text): - pass - - def parse_declaration(self, i): - # override internal declaration handler to handle CDATA blocks - if _debug: sys.stderr.write('entering parse_declaration\n') - if self.rawdata[i:i+9] == '<![CDATA[': - k = self.rawdata.find(']]>', i) - if k == -1: - # CDATA block began but didn't finish - k = len(self.rawdata) - return k - self.handle_data(_xmlescape(self.rawdata[i+9:k]), 0) - return k+3 - else: - k = self.rawdata.find('>', i) - if k >= 0: - return k+1 - else: - # We have an incomplete CDATA block. - return k - - def mapContentType(self, contentType): - contentType = contentType.lower() - if contentType == 'text': - contentType = 'text/plain' - elif contentType == 'html': - contentType = 'text/html' - elif contentType == 'xhtml': - contentType = 'application/xhtml+xml' - return contentType - - def trackNamespace(self, prefix, uri): - loweruri = uri.lower() - if (prefix, loweruri) == (None, 'http://my.netscape.com/rdf/simple/0.9/') and not self.version: - self.version = 'rss090' - if loweruri == 'http://purl.org/rss/1.0/' and not self.version: - self.version = 'rss10' - if loweruri == 'http://www.w3.org/2005/atom' and not self.version: - self.version = 'atom10' - if loweruri.find('backend.userland.com/rss') <> -1: - # match any backend.userland.com namespace - uri = 'http://backend.userland.com/rss' - loweruri = uri - if self._matchnamespaces.has_key(loweruri): - self.namespacemap[prefix] = self._matchnamespaces[loweruri] - self.namespacesInUse[self._matchnamespaces[loweruri]] = uri - else: - self.namespacesInUse[prefix or ''] = uri - - def resolveURI(self, uri): - return _urljoin(self.baseuri or '', uri) - - def decodeEntities(self, element, data): - return data - - def strattrs(self, attrs): - return ''.join([' %s="%s"' % (t[0],_xmlescape(t[1],{'"':'"'})) for t in attrs]) - - def push(self, element, expectingText): - self.elementstack.append([element, expectingText, []]) - - def pop(self, element, stripWhitespace=1): - if not self.elementstack: return - if self.elementstack[-1][0] != element: return - - element, expectingText, pieces = self.elementstack.pop() - - if self.version == 'atom10' and self.contentparams.get('type','text') == 'application/xhtml+xml': - # remove enclosing child element, but only if it is a <div> and - # only if all the remaining content is nested underneath it. - # This means that the divs would be retained in the following: - # <div>foo</div><div>bar</div> - while pieces and len(pieces)>1 and not pieces[-1].strip(): - del pieces[-1] - while pieces and len(pieces)>1 and not pieces[0].strip(): - del pieces[0] - if pieces and (pieces[0] == '<div>' or pieces[0].startswith('<div ')) and pieces[-1]=='</div>': - depth = 0 - for piece in pieces[:-1]: - if piece.startswith('</'): - depth -= 1 - if depth == 0: break - elif piece.startswith('<') and not piece.endswith('/>'): - depth += 1 - else: - pieces = pieces[1:-1] - - # Ensure each piece is a str for Python 3 - for (i, v) in enumerate(pieces): - if not isinstance(v, basestring): - pieces[i] = v.decode('utf-8') - - output = ''.join(pieces) - if stripWhitespace: - output = output.strip() - if not expectingText: return output - - # decode base64 content - if base64 and self.contentparams.get('base64', 0): - try: - output = _base64decode(output) - except binascii.Error: - pass - except binascii.Incomplete: - pass - except TypeError: - # In Python 3, base64 takes and outputs bytes, not str - # This may not be the most correct way to accomplish this - output = _base64decode(output.encode('utf-8')).decode('utf-8') - - # resolve relative URIs - if (element in self.can_be_relative_uri) and output: - output = self.resolveURI(output) - - # decode entities within embedded markup - if not self.contentparams.get('base64', 0): - output = self.decodeEntities(element, output) - - if self.lookslikehtml(output): - self.contentparams['type']='text/html' - - # remove temporary cruft from contentparams - try: - del self.contentparams['mode'] - except KeyError: - pass - try: - del self.contentparams['base64'] - except KeyError: - pass - - is_htmlish = self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types - # resolve relative URIs within embedded markup - if is_htmlish and RESOLVE_RELATIVE_URIS: - if element in self.can_contain_relative_uris: - output = _resolveRelativeURIs(output, self.baseuri, self.encoding, self.contentparams.get('type', 'text/html')) - - # parse microformats - # (must do this before sanitizing because some microformats - # rely on elements that we sanitize) - if is_htmlish and element in ['content', 'description', 'summary']: - mfresults = _parseMicroformats(output, self.baseuri, self.encoding) - if mfresults: - for tag in mfresults.get('tags', []): - self._addTag(tag['term'], tag['scheme'], tag['label']) - for enclosure in mfresults.get('enclosures', []): - self._start_enclosure(enclosure) - for xfn in mfresults.get('xfn', []): - self._addXFN(xfn['relationships'], xfn['href'], xfn['name']) - vcard = mfresults.get('vcard') - if vcard: - self._getContext()['vcard'] = vcard - - # sanitize embedded markup - if is_htmlish and SANITIZE_HTML: - if element in self.can_contain_dangerous_markup: - output = _sanitizeHTML(output, self.encoding, self.contentparams.get('type', 'text/html')) - - if self.encoding and type(output) != type(u''): - try: - output = unicode(output, self.encoding) - except: - pass - - # address common error where people take data that is already - # utf-8, presume that it is iso-8859-1, and re-encode it. - if self.encoding in ('utf-8', 'utf-8_INVALID_PYTHON_3') and type(output) == type(u''): - try: - output = unicode(output.encode('iso-8859-1'), 'utf-8') - except: - pass - - # map win-1252 extensions to the proper code points - if type(output) == type(u''): - output = u''.join([c in _cp1252.keys() and _cp1252[c] or c for c in output]) - - # categories/tags/keywords/whatever are handled in _end_category - if element == 'category': - return output - - if element == 'title' and self.hasTitle: - return output - - # store output in appropriate place(s) - if self.inentry and not self.insource: - if element == 'content': - self.entries[-1].setdefault(element, []) - contentparams = copy.deepcopy(self.contentparams) - contentparams['value'] = output - self.entries[-1][element].append(contentparams) - elif element == 'link': - if not self.inimage: - # query variables in urls in link elements are improperly - # converted from `?a=1&b=2` to `?a=1&b;=2` as if they're - # unhandled character references. fix this special case. - output = re.sub("&([A-Za-z0-9_]+);", "&\g<1>", output) - self.entries[-1][element] = output - if output: - self.entries[-1]['links'][-1]['href'] = output - else: - if element == 'description': - element = 'summary' - self.entries[-1][element] = output - if self.incontent: - contentparams = copy.deepcopy(self.contentparams) - contentparams['value'] = output - self.entries[-1][element + '_detail'] = contentparams - elif (self.infeed or self.insource):# and (not self.intextinput) and (not self.inimage): - context = self._getContext() - if element == 'description': - element = 'subtitle' - context[element] = output - if element == 'link': - # fix query variables; see above for the explanation - output = re.sub("&([A-Za-z0-9_]+);", "&\g<1>", output) - context[element] = output - context['links'][-1]['href'] = output - elif self.incontent: - contentparams = copy.deepcopy(self.contentparams) - contentparams['value'] = output - context[element + '_detail'] = contentparams - return output - - def pushContent(self, tag, attrsD, defaultContentType, expectingText): - self.incontent += 1 - if self.lang: self.lang=self.lang.replace('_','-') - self.contentparams = FeedParserDict({ - 'type': self.mapContentType(attrsD.get('type', defaultContentType)), - 'language': self.lang, - 'base': self.baseuri}) - self.contentparams['base64'] = self._isBase64(attrsD, self.contentparams) - self.push(tag, expectingText) - - def popContent(self, tag): - value = self.pop(tag) - self.incontent -= 1 - self.contentparams.clear() - return value - - # a number of elements in a number of RSS variants are nominally plain - # text, but this is routinely ignored. This is an attempt to detect - # the most common cases. As false positives often result in silent - # data loss, this function errs on the conservative side. - def lookslikehtml(self, s): - if self.version.startswith('atom'): return - if self.contentparams.get('type','text/html') != 'text/plain': return - - # must have a close tag or a entity reference to qualify - if not (re.search(r'</(\w+)>',s) or re.search("&#?\w+;",s)): return - - # all tags must be in a restricted subset of valid HTML tags - if filter(lambda t: t.lower() not in _HTMLSanitizer.acceptable_elements, - re.findall(r'</?(\w+)',s)): return - - # all entities must have been defined as valid HTML entities - from htmlentitydefs import entitydefs - if filter(lambda e: e not in entitydefs.keys(), - re.findall(r'&(\w+);',s)): return - - return 1 - - def _mapToStandardPrefix(self, name): - colonpos = name.find(':') - if colonpos <> -1: - prefix = name[:colonpos] - suffix = name[colonpos+1:] - prefix = self.namespacemap.get(prefix, prefix) - name = prefix + ':' + suffix - return name - - def _getAttribute(self, attrsD, name): - return attrsD.get(self._mapToStandardPrefix(name)) - - def _isBase64(self, attrsD, contentparams): - if attrsD.get('mode', '') == 'base64': - return 1 - if self.contentparams['type'].startswith('text/'): - return 0 - if self.contentparams['type'].endswith('+xml'): - return 0 - if self.contentparams['type'].endswith('/xml'): - return 0 - return 1 - - def _itsAnHrefDamnIt(self, attrsD): - href = attrsD.get('url', attrsD.get('uri', attrsD.get('href', None))) - if href: - try: - del attrsD['url'] - except KeyError: - pass - try: - del attrsD['uri'] - except KeyError: - pass - attrsD['href'] = href - return attrsD - - def _save(self, key, value, overwrite=False): - context = self._getContext() - if overwrite: - context[key] = value - else: - context.setdefault(key, value) - - def _start_rss(self, attrsD): - versionmap = {'0.91': 'rss091u', - '0.92': 'rss092', - '0.93': 'rss093', - '0.94': 'rss094'} - #If we're here then this is an RSS feed. - #If we don't have a version or have a version that starts with something - #other than RSS then there's been a mistake. Correct it. - if not self.version or not self.version.startswith('rss'): - attr_version = attrsD.get('version', '') - version = versionmap.get(attr_version) - if version: - self.version = version - elif attr_version.startswith('2.'): - self.version = 'rss20' - else: - self.version = 'rss' - - def _start_dlhottitles(self, attrsD): - self.version = 'hotrss' - - def _start_channel(self, attrsD): - self.infeed = 1 - self._cdf_common(attrsD) - _start_feedinfo = _start_channel - - def _cdf_common(self, attrsD): - if attrsD.has_key('lastmod'): - self._start_modified({}) - self.elementstack[-1][-1] = attrsD['lastmod'] - self._end_modified() - if attrsD.has_key('href'): - self._start_link({}) - self.elementstack[-1][-1] = attrsD['href'] - self._end_link() - - def _start_feed(self, attrsD): - self.infeed = 1 - versionmap = {'0.1': 'atom01', - '0.2': 'atom02', - '0.3': 'atom03'} - if not self.version: - attr_version = attrsD.get('version') - version = versionmap.get(attr_version) - if version: - self.version = version - else: - self.version = 'atom' - - def _end_channel(self): - self.infeed = 0 - _end_feed = _end_channel - - def _start_image(self, attrsD): - context = self._getContext() - if not self.inentry: - context.setdefault('image', FeedParserDict()) - self.inimage = 1 - self.hasTitle = 0 - self.push('image', 0) - - def _end_image(self): - self.pop('image') - self.inimage = 0 - - def _start_textinput(self, attrsD): - context = self._getContext() - context.setdefault('textinput', FeedParserDict()) - self.intextinput = 1 - self.hasTitle = 0 - self.push('textinput', 0) - _start_textInput = _start_textinput - - def _end_textinput(self): - self.pop('textinput') - self.intextinput = 0 - _end_textInput = _end_textinput - - def _start_author(self, attrsD): - self.inauthor = 1 - self.push('author', 1) - # Append a new FeedParserDict when expecting an author - context = self._getContext() - context.setdefault('authors', []) - context['authors'].append(FeedParserDict()) - _start_managingeditor = _start_author - _start_dc_author = _start_author - _start_dc_creator = _start_author - _start_itunes_author = _start_author - - def _end_author(self): - self.pop('author') - self.inauthor = 0 - self._sync_author_detail() - _end_managingeditor = _end_author - _end_dc_author = _end_author - _end_dc_creator = _end_author - _end_itunes_author = _end_author - - def _start_itunes_owner(self, attrsD): - self.inpublisher = 1 - self.push('publisher', 0) - - def _end_itunes_owner(self): - self.pop('publisher') - self.inpublisher = 0 - self._sync_author_detail('publisher') - - def _start_contributor(self, attrsD): - self.incontributor = 1 - context = self._getContext() - context.setdefault('contributors', []) - context['contributors'].append(FeedParserDict()) - self.push('contributor', 0) - - def _end_contributor(self): - self.pop('contributor') - self.incontributor = 0 - - def _start_dc_contributor(self, attrsD): - self.incontributor = 1 - context = self._getContext() - context.setdefault('contributors', []) - context['contributors'].append(FeedParserDict()) - self.push('name', 0) - - def _end_dc_contributor(self): - self._end_name() - self.incontributor = 0 - - def _start_name(self, attrsD): - self.push('name', 0) - _start_itunes_name = _start_name - - def _end_name(self): - value = self.pop('name') - if self.inpublisher: - self._save_author('name', value, 'publisher') - elif self.inauthor: - self._save_author('name', value) - elif self.incontributor: - self._save_contributor('name', value) - elif self.intextinput: - context = self._getContext() - context['name'] = value - _end_itunes_name = _end_name - - def _start_width(self, attrsD): - self.push('width', 0) - - def _end_width(self): - value = self.pop('width') - try: - value = int(value) - except: - value = 0 - if self.inimage: - context = self._getContext() - context['width'] = value - - def _start_height(self, attrsD): - self.push('height', 0) - - def _end_height(self): - value = self.pop('height') - try: - value = int(value) - except: - value = 0 - if self.inimage: - context = self._getContext() - context['height'] = value - - def _start_url(self, attrsD): - self.push('href', 1) - _start_homepage = _start_url - _start_uri = _start_url - - def _end_url(self): - value = self.pop('href') - if self.inauthor: - self._save_author('href', value) - elif self.incontributor: - self._save_contributor('href', value) - _end_homepage = _end_url - _end_uri = _end_url - - def _start_email(self, attrsD): - self.push('email', 0) - _start_itunes_email = _start_email - - def _end_email(self): - value = self.pop('email') - if self.inpublisher: - self._save_author('email', value, 'publisher') - elif self.inauthor: - self._save_author('email', value) - elif self.incontributor: - self._save_contributor('email', value) - _end_itunes_email = _end_email - - def _getContext(self): - if self.insource: - context = self.sourcedata - elif self.inimage and self.feeddata.has_key('image'): - context = self.feeddata['image'] - elif self.intextinput: - context = self.feeddata['textinput'] - elif self.inentry: - context = self.entries[-1] - else: - context = self.feeddata - return context - - def _save_author(self, key, value, prefix='author'): - context = self._getContext() - context.setdefault(prefix + '_detail', FeedParserDict()) - context[prefix + '_detail'][key] = value - self._sync_author_detail() - context.setdefault('authors', [FeedParserDict()]) - context['authors'][-1][key] = value - - def _save_contributor(self, key, value): - context = self._getContext() - context.setdefault('contributors', [FeedParserDict()]) - context['contributors'][-1][key] = value - - def _sync_author_detail(self, key='author'): - context = self._getContext() - detail = context.get('%s_detail' % key) - if detail: - name = detail.get('name') - email = detail.get('email') - if name and email: - context[key] = '%s (%s)' % (name, email) - elif name: - context[key] = name - elif email: - context[key] = email - else: - author, email = context.get(key), None - if not author: return - emailmatch = re.search(r'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))(\?subject=\S+)?''', author) - if emailmatch: - email = emailmatch.group(0) - # probably a better way to do the following, but it passes all the tests - author = author.replace(email, '') - author = author.replace('()', '') - author = author.replace('<>', '') - author = author.replace('<>', '') - author = author.strip() - if author and (author[0] == '('): - author = author[1:] - if author and (author[-1] == ')'): - author = author[:-1] - author = author.strip() - if author or email: - context.setdefault('%s_detail' % key, FeedParserDict()) - if author: - context['%s_detail' % key]['name'] = author - if email: - context['%s_detail' % key]['email'] = email - - def _start_subtitle(self, attrsD): - self.pushContent('subtitle', attrsD, 'text/plain', 1) - _start_tagline = _start_subtitle - _start_itunes_subtitle = _start_subtitle - - def _end_subtitle(self): - self.popContent('subtitle') - _end_tagline = _end_subtitle - _end_itunes_subtitle = _end_subtitle - - def _start_rights(self, attrsD): - self.pushContent('rights', attrsD, 'text/plain', 1) - _start_dc_rights = _start_rights - _start_copyright = _start_rights - - def _end_rights(self): - self.popContent('rights') - _end_dc_rights = _end_rights - _end_copyright = _end_rights - - def _start_item(self, attrsD): - self.entries.append(FeedParserDict()) - self.push('item', 0) - self.inentry = 1 - self.guidislink = 0 - self.hasTitle = 0 - id = self._getAttribute(attrsD, 'rdf:about') - if id: - context = self._getContext() - context['id'] = id - self._cdf_common(attrsD) - _start_entry = _start_item - _start_product = _start_item - - def _end_item(self): - self.pop('item') - self.inentry = 0 - _end_entry = _end_item - - def _start_dc_language(self, attrsD): - self.push('language', 1) - _start_language = _start_dc_language - - def _end_dc_language(self): - self.lang = self.pop('language') - _end_language = _end_dc_language - - def _start_dc_publisher(self, attrsD): - self.push('publisher', 1) - _start_webmaster = _start_dc_publisher - - def _end_dc_publisher(self): - self.pop('publisher') - self._sync_author_detail('publisher') - _end_webmaster = _end_dc_publisher - - def _start_published(self, attrsD): - self.push('published', 1) - _start_dcterms_issued = _start_published - _start_issued = _start_published - - def _end_published(self): - value = self.pop('published') - self._save('published_parsed', _parse_date(value), overwrite=True) - _end_dcterms_issued = _end_published - _end_issued = _end_published - - def _start_updated(self, attrsD): - self.push('updated', 1) - _start_modified = _start_updated - _start_dcterms_modified = _start_updated - _start_pubdate = _start_updated - _start_dc_date = _start_updated - _start_lastbuilddate = _start_updated - - def _end_updated(self): - value = self.pop('updated') - parsed_value = _parse_date(value) - self._save('updated_parsed', parsed_value, overwrite=True) - _end_modified = _end_updated - _end_dcterms_modified = _end_updated - _end_pubdate = _end_updated - _end_dc_date = _end_updated - _end_lastbuilddate = _end_updated - - def _start_created(self, attrsD): - self.push('created', 1) - _start_dcterms_created = _start_created - - def _end_created(self): - value = self.pop('created') - self._save('created_parsed', _parse_date(value), overwrite=True) - _end_dcterms_created = _end_created - - def _start_expirationdate(self, attrsD): - self.push('expired', 1) - - def _end_expirationdate(self): - self._save('expired_parsed', _parse_date(self.pop('expired')), overwrite=True) - - def _start_cc_license(self, attrsD): - context = self._getContext() - value = self._getAttribute(attrsD, 'rdf:resource') - attrsD = FeedParserDict() - attrsD['rel']='license' - if value: attrsD['href']=value - context.setdefault('links', []).append(attrsD) - - def _start_creativecommons_license(self, attrsD): - self.push('license', 1) - _start_creativeCommons_license = _start_creativecommons_license - - def _end_creativecommons_license(self): - value = self.pop('license') - context = self._getContext() - attrsD = FeedParserDict() - attrsD['rel']='license' - if value: attrsD['href']=value - context.setdefault('links', []).append(attrsD) - del context['license'] - _end_creativeCommons_license = _end_creativecommons_license - - def _addXFN(self, relationships, href, name): - context = self._getContext() - xfn = context.setdefault('xfn', []) - value = FeedParserDict({'relationships': relationships, 'href': href, 'name': name}) - if value not in xfn: - xfn.append(value) - - def _addTag(self, term, scheme, label): - context = self._getContext() - tags = context.setdefault('tags', []) - if (not term) and (not scheme) and (not label): return - value = FeedParserDict({'term': term, 'scheme': scheme, 'label': label}) - if value not in tags: - tags.append(value) - - def _start_category(self, attrsD): - if _debug: sys.stderr.write('entering _start_category with %s\n' % repr(attrsD)) - term = attrsD.get('term') - scheme = attrsD.get('scheme', attrsD.get('domain')) - label = attrsD.get('label') - self._addTag(term, scheme, label) - self.push('category', 1) - _start_dc_subject = _start_category - _start_keywords = _start_category - - def _start_media_category(self, attrsD): - attrsD.setdefault('scheme', 'http://search.yahoo.com/mrss/category_schema') - self._start_category(attrsD) - - def _end_itunes_keywords(self): - for term in self.pop('itunes_keywords').split(): - self._addTag(term, 'http://www.itunes.com/', None) - - def _start_itunes_category(self, attrsD): - self._addTag(attrsD.get('text'), 'http://www.itunes.com/', None) - self.push('category', 1) - - def _end_category(self): - value = self.pop('category') - if not value: return - context = self._getContext() - tags = context['tags'] - if value and len(tags) and not tags[-1]['term']: - tags[-1]['term'] = value - else: - self._addTag(value, None, None) - _end_dc_subject = _end_category - _end_keywords = _end_category - _end_itunes_category = _end_category - _end_media_category = _end_category - - def _start_cloud(self, attrsD): - self._getContext()['cloud'] = FeedParserDict(attrsD) - - def _start_link(self, attrsD): - attrsD.setdefault('rel', 'alternate') - if attrsD['rel'] == 'self': - attrsD.setdefault('type', 'application/atom+xml') - else: - attrsD.setdefault('type', 'text/html') - context = self._getContext() - attrsD = self._itsAnHrefDamnIt(attrsD) - if attrsD.has_key('href'): - attrsD['href'] = self.resolveURI(attrsD['href']) - expectingText = self.infeed or self.inentry or self.insource - context.setdefault('links', []) - if not (self.inentry and self.inimage): - context['links'].append(FeedParserDict(attrsD)) - if attrsD.has_key('href'): - expectingText = 0 - if (attrsD.get('rel') == 'alternate') and (self.mapContentType(attrsD.get('type')) in self.html_types): - context['link'] = attrsD['href'] - else: - self.push('link', expectingText) - _start_producturl = _start_link - - def _end_link(self): - value = self.pop('link') - context = self._getContext() - _end_producturl = _end_link - - def _start_guid(self, attrsD): - self.guidislink = (attrsD.get('ispermalink', 'true') == 'true') - self.push('id', 1) - - def _end_guid(self): - value = self.pop('id') - self._save('guidislink', self.guidislink and not self._getContext().has_key('link')) - if self.guidislink: - # guid acts as link, but only if 'ispermalink' is not present or is 'true', - # and only if the item doesn't already have a link element - self._save('link', value) - - def _start_title(self, attrsD): - if self.svgOK: return self.unknown_starttag('title', attrsD.items()) - self.pushContent('title', attrsD, 'text/plain', self.infeed or self.inentry or self.insource) - _start_dc_title = _start_title - _start_media_title = _start_title - - def _end_title(self): - if self.svgOK: return - value = self.popContent('title') - if not value: return - context = self._getContext() - self.hasTitle = 1 - _end_dc_title = _end_title - - def _end_media_title(self): - hasTitle = self.hasTitle - self._end_title() - self.hasTitle = hasTitle - - def _start_description(self, attrsD): - context = self._getContext() - if context.has_key('summary'): - self._summaryKey = 'content' - self._start_content(attrsD) - else: - self.pushContent('description', attrsD, 'text/html', self.infeed or self.inentry or self.insource) - _start_dc_description = _start_description - - def _start_abstract(self, attrsD): - self.pushContent('description', attrsD, 'text/plain', self.infeed or self.inentry or self.insource) - - def _end_description(self): - if self._summaryKey == 'content': - self._end_content() - else: - value = self.popContent('description') - self._summaryKey = None - _end_abstract = _end_description - _end_dc_description = _end_description - - def _start_info(self, attrsD): - self.pushContent('info', attrsD, 'text/plain', 1) - _start_feedburner_browserfriendly = _start_info - - def _end_info(self): - self.popContent('info') - _end_feedburner_browserfriendly = _end_info - - def _start_generator(self, attrsD): - if attrsD: - attrsD = self._itsAnHrefDamnIt(attrsD) - if attrsD.has_key('href'): - attrsD['href'] = self.resolveURI(attrsD['href']) - self._getContext()['generator_detail'] = FeedParserDict(attrsD) - self.push('generator', 1) - - def _end_generator(self): - value = self.pop('generator') - context = self._getContext() - if context.has_key('generator_detail'): - context['generator_detail']['name'] = value - - def _start_admin_generatoragent(self, attrsD): - self.push('generator', 1) - value = self._getAttribute(attrsD, 'rdf:resource') - if value: - self.elementstack[-1][2].append(value) - self.pop('generator') - self._getContext()['generator_detail'] = FeedParserDict({'href': value}) - - def _start_admin_errorreportsto(self, attrsD): - self.push('errorreportsto', 1) - value = self._getAttribute(attrsD, 'rdf:resource') - if value: - self.elementstack[-1][2].append(value) - self.pop('errorreportsto') - - def _start_summary(self, attrsD): - context = self._getContext() - if context.has_key('summary'): - self._summaryKey = 'content' - self._start_content(attrsD) - else: - self._summaryKey = 'summary' - self.pushContent(self._summaryKey, attrsD, 'text/plain', 1) - _start_itunes_summary = _start_summary - - def _end_summary(self): - if self._summaryKey == 'content': - self._end_content() - else: - self.popContent(self._summaryKey or 'summary') - self._summaryKey = None - _end_itunes_summary = _end_summary - - def _start_enclosure(self, attrsD): - attrsD = self._itsAnHrefDamnIt(attrsD) - context = self._getContext() - attrsD['rel']='enclosure' - context.setdefault('links', []).append(FeedParserDict(attrsD)) - - def _start_source(self, attrsD): - if 'url' in attrsD: - # This means that we're processing a source element from an RSS 2.0 feed - self.sourcedata['href'] = attrsD[u'url'] - self.push('source', 1) - self.insource = 1 - self.hasTitle = 0 - - def _end_source(self): - self.insource = 0 - value = self.pop('source') - if value: - self.sourcedata['title'] = value - self._getContext()['source'] = copy.deepcopy(self.sourcedata) - self.sourcedata.clear() - - def _start_content(self, attrsD): - self.pushContent('content', attrsD, 'text/plain', 1) - src = attrsD.get('src') - if src: - self.contentparams['src'] = src - self.push('content', 1) - - def _start_prodlink(self, attrsD): - self.pushContent('content', attrsD, 'text/html', 1) - - def _start_body(self, attrsD): - self.pushContent('content', attrsD, 'application/xhtml+xml', 1) - _start_xhtml_body = _start_body - - def _start_content_encoded(self, attrsD): - self.pushContent('content', attrsD, 'text/html', 1) - _start_fullitem = _start_content_encoded - - def _end_content(self): - copyToSummary = self.mapContentType(self.contentparams.get('type')) in (['text/plain'] + self.html_types) - value = self.popContent('content') - if copyToSummary: - self._save('summary', value) - - _end_body = _end_content - _end_xhtml_body = _end_content - _end_content_encoded = _end_content - _end_fullitem = _end_content - _end_prodlink = _end_content - - def _start_itunes_image(self, attrsD): - self.push('itunes_image', 0) - if attrsD.get('href'): - self._getContext()['image'] = FeedParserDict({'href': attrsD.get('href')}) - _start_itunes_link = _start_itunes_image - - def _end_itunes_block(self): - value = self.pop('itunes_block', 0) - self._getContext()['itunes_block'] = (value == 'yes') and 1 or 0 - - def _end_itunes_explicit(self): - value = self.pop('itunes_explicit', 0) - # Convert 'yes' -> True, 'clean' to False, and any other value to None - # False and None both evaluate as False, so the difference can be ignored - # by applications that only need to know if the content is explicit. - self._getContext()['itunes_explicit'] = (None, False, True)[(value == 'yes' and 2) or value == 'clean' or 0] - - def _start_media_content(self, attrsD): - context = self._getContext() - context.setdefault('media_content', []) - context['media_content'].append(attrsD) - - def _start_media_thumbnail(self, attrsD): - context = self._getContext() - context.setdefault('media_thumbnail', []) - self.push('url', 1) # new - context['media_thumbnail'].append(attrsD) - - def _end_media_thumbnail(self): - url = self.pop('url') - context = self._getContext() - if url is not None and len(url.strip()) != 0: - if not context['media_thumbnail'][-1].has_key('url'): - context['media_thumbnail'][-1]['url'] = url - - def _start_media_player(self, attrsD): - self.push('media_player', 0) - self._getContext()['media_player'] = FeedParserDict(attrsD) - - def _end_media_player(self): - value = self.pop('media_player') - context = self._getContext() - context['media_player']['content'] = value - - def _start_newlocation(self, attrsD): - self.push('newlocation', 1) - - def _end_newlocation(self): - url = self.pop('newlocation') - context = self._getContext() - # don't set newlocation if the context isn't right - if context is not self.feeddata: - return - context['newlocation'] = _makeSafeAbsoluteURI(self.baseuri, url.strip()) - -if _XML_AVAILABLE: - class _StrictFeedParser(_FeedParserMixin, xml.sax.handler.ContentHandler): - def __init__(self, baseuri, baselang, encoding): - if _debug: sys.stderr.write('trying StrictFeedParser\n') - xml.sax.handler.ContentHandler.__init__(self) - _FeedParserMixin.__init__(self, baseuri, baselang, encoding) - self.bozo = 0 - self.exc = None - self.decls = {} - - def startPrefixMapping(self, prefix, uri): - self.trackNamespace(prefix, uri) - if uri == 'http://www.w3.org/1999/xlink': - self.decls['xmlns:'+prefix] = uri - - def startElementNS(self, name, qname, attrs): - namespace, localname = name - lowernamespace = str(namespace or '').lower() - if lowernamespace.find('backend.userland.com/rss') <> -1: - # match any backend.userland.com namespace - namespace = 'http://backend.userland.com/rss' - lowernamespace = namespace - if qname and qname.find(':') > 0: - givenprefix = qname.split(':')[0] - else: - givenprefix = None - prefix = self._matchnamespaces.get(lowernamespace, givenprefix) - if givenprefix and (prefix is None or (prefix == '' and lowernamespace == '')) and not self.namespacesInUse.has_key(givenprefix): - raise UndeclaredNamespace, "'%s' is not associated with a namespace" % givenprefix - localname = str(localname).lower() - - # qname implementation is horribly broken in Python 2.1 (it - # doesn't report any), and slightly broken in Python 2.2 (it - # doesn't report the xml: namespace). So we match up namespaces - # with a known list first, and then possibly override them with - # the qnames the SAX parser gives us (if indeed it gives us any - # at all). Thanks to MatejC for helping me test this and - # tirelessly telling me that it didn't work yet. - attrsD, self.decls = self.decls, {} - if localname=='math' and namespace=='http://www.w3.org/1998/Math/MathML': - attrsD['xmlns']=namespace - if localname=='svg' and namespace=='http://www.w3.org/2000/svg': - attrsD['xmlns']=namespace - - if prefix: - localname = prefix.lower() + ':' + localname - elif namespace and not qname: #Expat - for name,value in self.namespacesInUse.items(): - if name and value == namespace: - localname = name + ':' + localname - break - if _debug: sys.stderr.write('startElementNS: qname = %s, namespace = %s, givenprefix = %s, prefix = %s, attrs = %s, localname = %s\n' % (qname, namespace, givenprefix, prefix, attrs.items(), localname)) - - for (namespace, attrlocalname), attrvalue in attrs._attrs.items(): - lowernamespace = (namespace or '').lower() - prefix = self._matchnamespaces.get(lowernamespace, '') - if prefix: - attrlocalname = prefix + ':' + attrlocalname - attrsD[str(attrlocalname).lower()] = attrvalue - for qname in attrs.getQNames(): - attrsD[str(qname).lower()] = attrs.getValueByQName(qname) - self.unknown_starttag(localname, attrsD.items()) - - def characters(self, text): - self.handle_data(text) - - def endElementNS(self, name, qname): - namespace, localname = name - lowernamespace = str(namespace or '').lower() - if qname and qname.find(':') > 0: - givenprefix = qname.split(':')[0] - else: - givenprefix = '' - prefix = self._matchnamespaces.get(lowernamespace, givenprefix) - if prefix: - localname = prefix + ':' + localname - elif namespace and not qname: #Expat - for name,value in self.namespacesInUse.items(): - if name and value == namespace: - localname = name + ':' + localname - break - localname = str(localname).lower() - self.unknown_endtag(localname) - - def error(self, exc): - self.bozo = 1 - self.exc = exc - - def fatalError(self, exc): - self.error(exc) - raise exc - -class _BaseHTMLProcessor(sgmllib.SGMLParser): - special = re.compile('''[<>'"]''') - bare_ampersand = re.compile("&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)") - elements_no_end_tag = [ - 'area', 'base', 'basefont', 'br', 'col', 'command', 'embed', 'frame', - 'hr', 'img', 'input', 'isindex', 'keygen', 'link', 'meta', 'param', - 'source', 'track', 'wbr' - ] - - def __init__(self, encoding, _type): - self.encoding = encoding - self._type = _type - if _debug: sys.stderr.write('entering BaseHTMLProcessor, encoding=%s\n' % self.encoding) - sgmllib.SGMLParser.__init__(self) - - def reset(self): - self.pieces = [] - sgmllib.SGMLParser.reset(self) - - def _shorttag_replace(self, match): - tag = match.group(1) - if tag in self.elements_no_end_tag: - return '<' + tag + ' />' - else: - return '<' + tag + '></' + tag + '>' - - def parse_starttag(self,i): - j=sgmllib.SGMLParser.parse_starttag(self, i) - if self._type == 'application/xhtml+xml': - if j>2 and self.rawdata[j-2:j]=='/>': - self.unknown_endtag(self.lasttag) - return j - - def feed(self, data): - data = re.compile(r'<!((?!DOCTYPE|--|\[))', re.IGNORECASE).sub(r'<!\1', data) - #data = re.sub(r'<(\S+?)\s*?/>', self._shorttag_replace, data) # bug [ 1399464 ] Bad regexp for _shorttag_replace - data = re.sub(r'<([^<>\s]+?)\s*/>', self._shorttag_replace, data) - data = data.replace(''', "'") - data = data.replace('"', '"') - try: - bytes - if bytes is str: - raise NameError - self.encoding = self.encoding + '_INVALID_PYTHON_3' - except NameError: - if self.encoding and type(data) == type(u''): - data = data.encode(self.encoding) - sgmllib.SGMLParser.feed(self, data) - sgmllib.SGMLParser.close(self) - - def normalize_attrs(self, attrs): - if not attrs: return attrs - # utility method to be called by descendants - attrs = dict([(k.lower(), v) for k, v in attrs]).items() - attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs] - attrs.sort() - return attrs - - def unknown_starttag(self, tag, attrs): - # called for each start tag - # attrs is a list of (attr, value) tuples - # e.g. for <pre class='screen'>, tag='pre', attrs=[('class', 'screen')] - if _debug: sys.stderr.write('_BaseHTMLProcessor, unknown_starttag, tag=%s\n' % tag) - uattrs = [] - strattrs='' - if attrs: - for key, value in attrs: - value=value.replace('>','>').replace('<','<').replace('"','"') - value = self.bare_ampersand.sub("&", value) - # thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds - if type(value) != type(u''): - try: - value = unicode(value, self.encoding) - except: - value = unicode(value, 'iso-8859-1') - try: - # Currently, in Python 3 the key is already a str, and cannot be decoded again - uattrs.append((unicode(key, self.encoding), value)) - except TypeError: - uattrs.append((key, value)) - strattrs = u''.join([u' %s="%s"' % (key, value) for key, value in uattrs]) - if self.encoding: - try: - strattrs=strattrs.encode(self.encoding) - except: - pass - if tag in self.elements_no_end_tag: - self.pieces.append('<%(tag)s%(strattrs)s />' % locals()) - else: - self.pieces.append('<%(tag)s%(strattrs)s>' % locals()) - - def unknown_endtag(self, tag): - # called for each end tag, e.g. for </pre>, tag will be 'pre' - # Reconstruct the original end tag. - if tag not in self.elements_no_end_tag: - self.pieces.append("</%(tag)s>" % locals()) - - def handle_charref(self, ref): - # called for each character reference, e.g. for ' ', ref will be '160' - # Reconstruct the original character reference. - if ref.startswith('x'): - value = unichr(int(ref[1:],16)) - else: - value = unichr(int(ref)) - - if value in _cp1252.keys(): - self.pieces.append('&#%s;' % hex(ord(_cp1252[value]))[1:]) - else: - self.pieces.append('&#%(ref)s;' % locals()) - - def handle_entityref(self, ref): - # called for each entity reference, e.g. for '©', ref will be 'copy' - # Reconstruct the original entity reference. - if name2codepoint.has_key(ref): - self.pieces.append('&%(ref)s;' % locals()) - else: - self.pieces.append('&%(ref)s' % locals()) - - def handle_data(self, text): - # called for each block of plain text, i.e. outside of any tag and - # not containing any character or entity references - # Store the original text verbatim. - if _debug: sys.stderr.write('_BaseHTMLProcessor, handle_data, text=%s\n' % text) - self.pieces.append(text) - - def handle_comment(self, text): - # called for each HTML comment, e.g. <!-- insert Javascript code here --> - # Reconstruct the original comment. - self.pieces.append('<!--%(text)s-->' % locals()) - - def handle_pi(self, text): - # called for each processing instruction, e.g. <?instruction> - # Reconstruct original processing instruction. - self.pieces.append('<?%(text)s>' % locals()) - - def handle_decl(self, text): - # called for the DOCTYPE, if present, e.g. - # <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" - # "http://www.w3.org/TR/html4/loose.dtd"> - # Reconstruct original DOCTYPE - self.pieces.append('<!%(text)s>' % locals()) - - _new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match - def _scan_name(self, i, declstartpos): - rawdata = self.rawdata - n = len(rawdata) - if i == n: - return None, -1 - m = self._new_declname_match(rawdata, i) - if m: - s = m.group() - name = s.strip() - if (i + len(s)) == n: - return None, -1 # end of buffer - return name.lower(), m.end() - else: - self.handle_data(rawdata) -# self.updatepos(declstartpos, i) - return None, -1 - - def convert_charref(self, name): - return '&#%s;' % name - - def convert_entityref(self, name): - return '&%s;' % name - - def output(self): - '''Return processed HTML as a single string''' - return ''.join([str(p) for p in self.pieces]) - -class _LooseFeedParser(_FeedParserMixin, _BaseHTMLProcessor): - def __init__(self, baseuri, baselang, encoding, entities): - sgmllib.SGMLParser.__init__(self) - _FeedParserMixin.__init__(self, baseuri, baselang, encoding) - _BaseHTMLProcessor.__init__(self, encoding, 'application/xhtml+xml') - self.entities=entities - - def decodeEntities(self, element, data): - data = data.replace('<', '<') - data = data.replace('<', '<') - data = data.replace('<', '<') - data = data.replace('>', '>') - data = data.replace('>', '>') - data = data.replace('>', '>') - data = data.replace('&', '&') - data = data.replace('&', '&') - data = data.replace('"', '"') - data = data.replace('"', '"') - data = data.replace(''', ''') - data = data.replace(''', ''') - if self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'): - data = data.replace('<', '<') - data = data.replace('>', '>') - data = data.replace('&', '&') - data = data.replace('"', '"') - data = data.replace(''', "'") - return data - - def strattrs(self, attrs): - return ''.join([' %s="%s"' % (n,v.replace('"','"')) for n,v in attrs]) - -class _MicroformatsParser: - STRING = 1 - DATE = 2 - URI = 3 - NODE = 4 - EMAIL = 5 - - known_xfn_relationships = ['contact', 'acquaintance', 'friend', 'met', 'co-worker', 'coworker', 'colleague', 'co-resident', 'coresident', 'neighbor', 'child', 'parent', 'sibling', 'brother', 'sister', 'spouse', 'wife', 'husband', 'kin', 'relative', 'muse', 'crush', 'date', 'sweetheart', 'me'] - known_binary_extensions = ['zip','rar','exe','gz','tar','tgz','tbz2','bz2','z','7z','dmg','img','sit','sitx','hqx','deb','rpm','bz2','jar','rar','iso','bin','msi','mp2','mp3','ogg','ogm','mp4','m4v','m4a','avi','wma','wmv'] - - def __init__(self, data, baseuri, encoding): - self.document = BeautifulSoup.BeautifulSoup(data) - self.baseuri = baseuri - self.encoding = encoding - if type(data) == type(u''): - data = data.encode(encoding) - self.tags = [] - self.enclosures = [] - self.xfn = [] - self.vcard = None - - def vcardEscape(self, s): - if type(s) in (type(''), type(u'')): - s = s.replace(',', '\\,').replace(';', '\\;').replace('\n', '\\n') - return s - - def vcardFold(self, s): - s = re.sub(';+$', '', s) - sFolded = '' - iMax = 75 - sPrefix = '' - while len(s) > iMax: - sFolded += sPrefix + s[:iMax] + '\n' - s = s[iMax:] - sPrefix = ' ' - iMax = 74 - sFolded += sPrefix + s - return sFolded - - def normalize(self, s): - return re.sub(r'\s+', ' ', s).strip() - - def unique(self, aList): - results = [] - for element in aList: - if element not in results: - results.append(element) - return results - - def toISO8601(self, dt): - return time.strftime('%Y-%m-%dT%H:%M:%SZ', dt) - - def getPropertyValue(self, elmRoot, sProperty, iPropertyType=4, bAllowMultiple=0, bAutoEscape=0): - all = lambda x: 1 - sProperty = sProperty.lower() - bFound = 0 - bNormalize = 1 - propertyMatch = {'class': re.compile(r'\b%s\b' % sProperty)} - if bAllowMultiple and (iPropertyType != self.NODE): - snapResults = [] - containers = elmRoot(['ul', 'ol'], propertyMatch) - for container in containers: - snapResults.extend(container('li')) - bFound = (len(snapResults) != 0) - if not bFound: - snapResults = elmRoot(all, propertyMatch) - bFound = (len(snapResults) != 0) - if (not bFound) and (sProperty == 'value'): - snapResults = elmRoot('pre') - bFound = (len(snapResults) != 0) - bNormalize = not bFound - if not bFound: - snapResults = [elmRoot] - bFound = (len(snapResults) != 0) - arFilter = [] - if sProperty == 'vcard': - snapFilter = elmRoot(all, propertyMatch) - for node in snapFilter: - if node.findParent(all, propertyMatch): - arFilter.append(node) - arResults = [] - for node in snapResults: - if node not in arFilter: - arResults.append(node) - bFound = (len(arResults) != 0) - if not bFound: - if bAllowMultiple: return [] - elif iPropertyType == self.STRING: return '' - elif iPropertyType == self.DATE: return None - elif iPropertyType == self.URI: return '' - elif iPropertyType == self.NODE: return None - else: return None - arValues = [] - for elmResult in arResults: - sValue = None - if iPropertyType == self.NODE: - if bAllowMultiple: - arValues.append(elmResult) - continue - else: - return elmResult - sNodeName = elmResult.name.lower() - if (iPropertyType == self.EMAIL) and (sNodeName == 'a'): - sValue = (elmResult.get('href') or '').split('mailto:').pop().split('?')[0] - if sValue: - sValue = bNormalize and self.normalize(sValue) or sValue.strip() - if (not sValue) and (sNodeName == 'abbr'): - sValue = elmResult.get('title') - if sValue: - sValue = bNormalize and self.normalize(sValue) or sValue.strip() - if (not sValue) and (iPropertyType == self.URI): - if sNodeName == 'a': sValue = elmResult.get('href') - elif sNodeName == 'img': sValue = elmResult.get('src') - elif sNodeName == 'object': sValue = elmResult.get('data') - if sValue: - sValue = bNormalize and self.normalize(sValue) or sValue.strip() - if (not sValue) and (sNodeName == 'img'): - sValue = elmResult.get('alt') - if sValue: - sValue = bNormalize and self.normalize(sValue) or sValue.strip() - if not sValue: - sValue = elmResult.renderContents() - sValue = re.sub(r'<\S[^>]*>', '', sValue) - sValue = sValue.replace('\r\n', '\n') - sValue = sValue.replace('\r', '\n') - if sValue: - sValue = bNormalize and self.normalize(sValue) or sValue.strip() - if not sValue: continue - if iPropertyType == self.DATE: - sValue = _parse_date_iso8601(sValue) - if bAllowMultiple: - arValues.append(bAutoEscape and self.vcardEscape(sValue) or sValue) - else: - return bAutoEscape and self.vcardEscape(sValue) or sValue - return arValues - - def findVCards(self, elmRoot, bAgentParsing=0): - sVCards = '' - - if not bAgentParsing: - arCards = self.getPropertyValue(elmRoot, 'vcard', bAllowMultiple=1) - else: - arCards = [elmRoot] - - for elmCard in arCards: - arLines = [] - - def processSingleString(sProperty): - sValue = self.getPropertyValue(elmCard, sProperty, self.STRING, bAutoEscape=1).decode(self.encoding) - if sValue: - arLines.append(self.vcardFold(sProperty.upper() + ':' + sValue)) - return sValue or u'' - - def processSingleURI(sProperty): - sValue = self.getPropertyValue(elmCard, sProperty, self.URI) - if sValue: - sContentType = '' - sEncoding = '' - sValueKey = '' - if sValue.startswith('data:'): - sEncoding = ';ENCODING=b' - sContentType = sValue.split(';')[0].split('/').pop() - sValue = sValue.split(',', 1).pop() - else: - elmValue = self.getPropertyValue(elmCard, sProperty) - if elmValue: - if sProperty != 'url': - sValueKey = ';VALUE=uri' - sContentType = elmValue.get('type', '').strip().split('/').pop().strip() - sContentType = sContentType.upper() - if sContentType == 'OCTET-STREAM': - sContentType = '' - if sContentType: - sContentType = ';TYPE=' + sContentType.upper() - arLines.append(self.vcardFold(sProperty.upper() + sEncoding + sContentType + sValueKey + ':' + sValue)) - - def processTypeValue(sProperty, arDefaultType, arForceType=None): - arResults = self.getPropertyValue(elmCard, sProperty, bAllowMultiple=1) - for elmResult in arResults: - arType = self.getPropertyValue(elmResult, 'type', self.STRING, 1, 1) - if arForceType: - arType = self.unique(arForceType + arType) - if not arType: - arType = arDefaultType - sValue = self.getPropertyValue(elmResult, 'value', self.EMAIL, 0) - if sValue: - arLines.append(self.vcardFold(sProperty.upper() + ';TYPE=' + ','.join(arType) + ':' + sValue)) - - # AGENT - # must do this before all other properties because it is destructive - # (removes nested class="vcard" nodes so they don't interfere with - # this vcard's other properties) - arAgent = self.getPropertyValue(elmCard, 'agent', bAllowMultiple=1) - for elmAgent in arAgent: - if re.compile(r'\bvcard\b').search(elmAgent.get('class')): - sAgentValue = self.findVCards(elmAgent, 1) + '\n' - sAgentValue = sAgentValue.replace('\n', '\\n') - sAgentValue = sAgentValue.replace(';', '\\;') - if sAgentValue: - arLines.append(self.vcardFold('AGENT:' + sAgentValue)) - # Completely remove the agent element from the parse tree - elmAgent.extract() - else: - sAgentValue = self.getPropertyValue(elmAgent, 'value', self.URI, bAutoEscape=1); - if sAgentValue: - arLines.append(self.vcardFold('AGENT;VALUE=uri:' + sAgentValue)) - - # FN (full name) - sFN = processSingleString('fn') - - # N (name) - elmName = self.getPropertyValue(elmCard, 'n') - if elmName: - sFamilyName = self.getPropertyValue(elmName, 'family-name', self.STRING, bAutoEscape=1) - sGivenName = self.getPropertyValue(elmName, 'given-name', self.STRING, bAutoEscape=1) - arAdditionalNames = self.getPropertyValue(elmName, 'additional-name', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'additional-names', self.STRING, 1, 1) - arHonorificPrefixes = self.getPropertyValue(elmName, 'honorific-prefix', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'honorific-prefixes', self.STRING, 1, 1) - arHonorificSuffixes = self.getPropertyValue(elmName, 'honorific-suffix', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'honorific-suffixes', self.STRING, 1, 1) - arLines.append(self.vcardFold('N:' + sFamilyName + ';' + - sGivenName + ';' + - ','.join(arAdditionalNames) + ';' + - ','.join(arHonorificPrefixes) + ';' + - ','.join(arHonorificSuffixes))) - elif sFN: - # implied "N" optimization - # http://microformats.org/wiki/hcard#Implied_.22N.22_Optimization - arNames = self.normalize(sFN).split() - if len(arNames) == 2: - bFamilyNameFirst = (arNames[0].endswith(',') or - len(arNames[1]) == 1 or - ((len(arNames[1]) == 2) and (arNames[1].endswith('.')))) - if bFamilyNameFirst: - arLines.append(self.vcardFold('N:' + arNames[0] + ';' + arNames[1])) - else: - arLines.append(self.vcardFold('N:' + arNames[1] + ';' + arNames[0])) - - # SORT-STRING - sSortString = self.getPropertyValue(elmCard, 'sort-string', self.STRING, bAutoEscape=1) - if sSortString: - arLines.append(self.vcardFold('SORT-STRING:' + sSortString)) - - # NICKNAME - arNickname = self.getPropertyValue(elmCard, 'nickname', self.STRING, 1, 1) - if arNickname: - arLines.append(self.vcardFold('NICKNAME:' + ','.join(arNickname))) - - # PHOTO - processSingleURI('photo') - - # BDAY - dtBday = self.getPropertyValue(elmCard, 'bday', self.DATE) - if dtBday: - arLines.append(self.vcardFold('BDAY:' + self.toISO8601(dtBday))) - - # ADR (address) - arAdr = self.getPropertyValue(elmCard, 'adr', bAllowMultiple=1) - for elmAdr in arAdr: - arType = self.getPropertyValue(elmAdr, 'type', self.STRING, 1, 1) - if not arType: - arType = ['intl','postal','parcel','work'] # default adr types, see RFC 2426 section 3.2.1 - sPostOfficeBox = self.getPropertyValue(elmAdr, 'post-office-box', self.STRING, 0, 1) - sExtendedAddress = self.getPropertyValue(elmAdr, 'extended-address', self.STRING, 0, 1) - sStreetAddress = self.getPropertyValue(elmAdr, 'street-address', self.STRING, 0, 1) - sLocality = self.getPropertyValue(elmAdr, 'locality', self.STRING, 0, 1) - sRegion = self.getPropertyValue(elmAdr, 'region', self.STRING, 0, 1) - sPostalCode = self.getPropertyValue(elmAdr, 'postal-code', self.STRING, 0, 1) - sCountryName = self.getPropertyValue(elmAdr, 'country-name', self.STRING, 0, 1) - arLines.append(self.vcardFold('ADR;TYPE=' + ','.join(arType) + ':' + - sPostOfficeBox + ';' + - sExtendedAddress + ';' + - sStreetAddress + ';' + - sLocality + ';' + - sRegion + ';' + - sPostalCode + ';' + - sCountryName)) - - # LABEL - processTypeValue('label', ['intl','postal','parcel','work']) - - # TEL (phone number) - processTypeValue('tel', ['voice']) - - # EMAIL - processTypeValue('email', ['internet'], ['internet']) - - # MAILER - processSingleString('mailer') - - # TZ (timezone) - processSingleString('tz') - - # GEO (geographical information) - elmGeo = self.getPropertyValue(elmCard, 'geo') - if elmGeo: - sLatitude = self.getPropertyValue(elmGeo, 'latitude', self.STRING, 0, 1) - sLongitude = self.getPropertyValue(elmGeo, 'longitude', self.STRING, 0, 1) - arLines.append(self.vcardFold('GEO:' + sLatitude + ';' + sLongitude)) - - # TITLE - processSingleString('title') - - # ROLE - processSingleString('role') - - # LOGO - processSingleURI('logo') - - # ORG (organization) - elmOrg = self.getPropertyValue(elmCard, 'org') - if elmOrg: - sOrganizationName = self.getPropertyValue(elmOrg, 'organization-name', self.STRING, 0, 1) - if not sOrganizationName: - # implied "organization-name" optimization - # http://microformats.org/wiki/hcard#Implied_.22organization-name.22_Optimization - sOrganizationName = self.getPropertyValue(elmCard, 'org', self.STRING, 0, 1) - if sOrganizationName: - arLines.append(self.vcardFold('ORG:' + sOrganizationName)) - else: - arOrganizationUnit = self.getPropertyValue(elmOrg, 'organization-unit', self.STRING, 1, 1) - arLines.append(self.vcardFold('ORG:' + sOrganizationName + ';' + ';'.join(arOrganizationUnit))) - - # CATEGORY - arCategory = self.getPropertyValue(elmCard, 'category', self.STRING, 1, 1) + self.getPropertyValue(elmCard, 'categories', self.STRING, 1, 1) - if arCategory: - arLines.append(self.vcardFold('CATEGORIES:' + ','.join(arCategory))) - - # NOTE - processSingleString('note') - - # REV - processSingleString('rev') - - # SOUND - processSingleURI('sound') - - # UID - processSingleString('uid') - - # URL - processSingleURI('url') - - # CLASS - processSingleString('class') - - # KEY - processSingleURI('key') - - if arLines: - arLines = [u'BEGIN:vCard',u'VERSION:3.0'] + arLines + [u'END:vCard'] - sVCards += u'\n'.join(arLines) + u'\n' - - return sVCards.strip() - - def isProbablyDownloadable(self, elm): - attrsD = elm.attrMap - if not attrsD.has_key('href'): return 0 - linktype = attrsD.get('type', '').strip() - if linktype.startswith('audio/') or \ - linktype.startswith('video/') or \ - (linktype.startswith('application/') and not linktype.endswith('xml')): - return 1 - path = urlparse.urlparse(attrsD['href'])[2] - if path.find('.') == -1: return 0 - fileext = path.split('.').pop().lower() - return fileext in self.known_binary_extensions - - def findTags(self): - all = lambda x: 1 - for elm in self.document(all, {'rel': re.compile(r'\btag\b')}): - href = elm.get('href') - if not href: continue - urlscheme, domain, path, params, query, fragment = \ - urlparse.urlparse(_urljoin(self.baseuri, href)) - segments = path.split('/') - tag = segments.pop() - if not tag: - tag = segments.pop() - tagscheme = urlparse.urlunparse((urlscheme, domain, '/'.join(segments), '', '', '')) - if not tagscheme.endswith('/'): - tagscheme += '/' - self.tags.append(FeedParserDict({"term": tag, "scheme": tagscheme, "label": elm.string or ''})) - - def findEnclosures(self): - all = lambda x: 1 - enclosure_match = re.compile(r'\benclosure\b') - for elm in self.document(all, {'href': re.compile(r'.+')}): - if not enclosure_match.search(elm.get('rel', '')) and not self.isProbablyDownloadable(elm): continue - if elm.attrMap not in self.enclosures: - self.enclosures.append(elm.attrMap) - if elm.string and not elm.get('title'): - self.enclosures[-1]['title'] = elm.string - - def findXFN(self): - all = lambda x: 1 - for elm in self.document(all, {'rel': re.compile('.+'), 'href': re.compile('.+')}): - rels = elm.get('rel', '').split() - xfn_rels = [] - for rel in rels: - if rel in self.known_xfn_relationships: - xfn_rels.append(rel) - if xfn_rels: - self.xfn.append({"relationships": xfn_rels, "href": elm.get('href', ''), "name": elm.string}) - -def _parseMicroformats(htmlSource, baseURI, encoding): - if not BeautifulSoup: return - if _debug: sys.stderr.write('entering _parseMicroformats\n') - try: - p = _MicroformatsParser(htmlSource, baseURI, encoding) - except UnicodeEncodeError: - # sgmllib throws this exception when performing lookups of tags - # with non-ASCII characters in them. - return - p.vcard = p.findVCards(p.document) - p.findTags() - p.findEnclosures() - p.findXFN() - return {"tags": p.tags, "enclosures": p.enclosures, "xfn": p.xfn, "vcard": p.vcard} - -class _RelativeURIResolver(_BaseHTMLProcessor): - relative_uris = [('a', 'href'), - ('applet', 'codebase'), - ('area', 'href'), - ('blockquote', 'cite'), - ('body', 'background'), - ('del', 'cite'), - ('form', 'action'), - ('frame', 'longdesc'), - ('frame', 'src'), - ('iframe', 'longdesc'), - ('iframe', 'src'), - ('head', 'profile'), - ('img', 'longdesc'), - ('img', 'src'), - ('img', 'usemap'), - ('input', 'src'), - ('input', 'usemap'), - ('ins', 'cite'), - ('link', 'href'), - ('object', 'classid'), - ('object', 'codebase'), - ('object', 'data'), - ('object', 'usemap'), - ('q', 'cite'), - ('script', 'src')] - - def __init__(self, baseuri, encoding, _type): - _BaseHTMLProcessor.__init__(self, encoding, _type) - self.baseuri = baseuri - - def resolveURI(self, uri): - return _makeSafeAbsoluteURI(_urljoin(self.baseuri, uri.strip())) - - def unknown_starttag(self, tag, attrs): - if _debug: - sys.stderr.write('tag: [%s] with attributes: [%s]\n' % (tag, str(attrs))) - attrs = self.normalize_attrs(attrs) - attrs = [(key, ((tag, key) in self.relative_uris) and self.resolveURI(value) or value) for key, value in attrs] - _BaseHTMLProcessor.unknown_starttag(self, tag, attrs) - -def _resolveRelativeURIs(htmlSource, baseURI, encoding, _type): - if _debug: - sys.stderr.write('entering _resolveRelativeURIs\n') - - p = _RelativeURIResolver(baseURI, encoding, _type) - p.feed(htmlSource) - return p.output() - -def _makeSafeAbsoluteURI(base, rel=None): - # bail if ACCEPTABLE_URI_SCHEMES is empty - if not ACCEPTABLE_URI_SCHEMES: - return _urljoin(base, rel or u'') - if not base: - return rel or u'' - if not rel: - if base.strip().split(':', 1)[0] not in ACCEPTABLE_URI_SCHEMES: - return u'' - return base - uri = _urljoin(base, rel) - if uri.strip().split(':', 1)[0] not in ACCEPTABLE_URI_SCHEMES: - return u'' - return uri - -class _HTMLSanitizer(_BaseHTMLProcessor): - acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area', - 'article', 'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button', - 'canvas', 'caption', 'center', 'cite', 'code', 'col', 'colgroup', - 'command', 'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn', - 'dialog', 'dir', 'div', 'dl', 'dt', 'em', 'event-source', 'fieldset', - 'figcaption', 'figure', 'footer', 'font', 'form', 'header', 'h1', - 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins', - 'keygen', 'kbd', 'label', 'legend', 'li', 'm', 'map', 'menu', 'meter', - 'multicol', 'nav', 'nextid', 'ol', 'output', 'optgroup', 'option', - 'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select', - 'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong', - 'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot', - 'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var', 'video', 'noscript'] - - acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey', - 'action', 'align', 'alt', 'autocomplete', 'autofocus', 'axis', - 'background', 'balance', 'bgcolor', 'bgproperties', 'border', - 'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding', - 'cellpadding', 'cellspacing', 'ch', 'challenge', 'char', 'charoff', - 'choff', 'charset', 'checked', 'cite', 'class', 'clear', 'color', 'cols', - 'colspan', 'compact', 'contenteditable', 'controls', 'coords', 'data', - 'datafld', 'datapagesize', 'datasrc', 'datetime', 'default', 'delay', - 'dir', 'disabled', 'draggable', 'dynsrc', 'enctype', 'end', 'face', 'for', - 'form', 'frame', 'galleryimg', 'gutter', 'headers', 'height', 'hidefocus', - 'hidden', 'high', 'href', 'hreflang', 'hspace', 'icon', 'id', 'inputmode', - 'ismap', 'keytype', 'label', 'leftspacing', 'lang', 'list', 'longdesc', - 'loop', 'loopcount', 'loopend', 'loopstart', 'low', 'lowsrc', 'max', - 'maxlength', 'media', 'method', 'min', 'multiple', 'name', 'nohref', - 'noshade', 'nowrap', 'open', 'optimum', 'pattern', 'ping', 'point-size', - 'prompt', 'pqg', 'radiogroup', 'readonly', 'rel', 'repeat-max', - 'repeat-min', 'replace', 'required', 'rev', 'rightspacing', 'rows', - 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size', 'span', 'src', - 'start', 'step', 'summary', 'suppress', 'tabindex', 'target', 'template', - 'title', 'toppadding', 'type', 'unselectable', 'usemap', 'urn', 'valign', - 'value', 'variable', 'volume', 'vspace', 'vrml', 'width', 'wrap', - 'xml:lang'] - - unacceptable_elements_with_end_tag = ['script', 'applet', 'style'] - - acceptable_css_properties = ['azimuth', 'background-color', - 'border-bottom-color', 'border-collapse', 'border-color', - 'border-left-color', 'border-right-color', 'border-top-color', 'clear', - 'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font', - 'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight', - 'height', 'letter-spacing', 'line-height', 'overflow', 'pause', - 'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness', - 'speak', 'speak-header', 'speak-numeral', 'speak-punctuation', - 'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent', - 'unicode-bidi', 'vertical-align', 'voice-family', 'volume', - 'white-space', 'width'] - - # survey of common keywords found in feeds - acceptable_css_keywords = ['auto', 'aqua', 'black', 'block', 'blue', - 'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed', - 'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left', - 'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive', - 'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top', - 'transparent', 'underline', 'white', 'yellow'] - - valid_css_values = re.compile('^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|' + - '\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$') - - mathml_elements = ['annotation', 'annotation-xml', 'maction', 'math', - 'merror', 'mfenced', 'mfrac', 'mi', 'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded', - 'mphantom', 'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle', - 'msub', 'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder', - 'munderover', 'none', 'semantics'] - - mathml_attributes = ['actiontype', 'align', 'columnalign', 'columnalign', - 'columnalign', 'close', 'columnlines', 'columnspacing', 'columnspan', 'depth', - 'display', 'displaystyle', 'encoding', 'equalcolumns', 'equalrows', - 'fence', 'fontstyle', 'fontweight', 'frame', 'height', 'linethickness', - 'lspace', 'mathbackground', 'mathcolor', 'mathvariant', 'mathvariant', - 'maxsize', 'minsize', 'open', 'other', 'rowalign', 'rowalign', 'rowalign', - 'rowlines', 'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection', - 'separator', 'separators', 'stretchy', 'width', 'width', 'xlink:href', - 'xlink:show', 'xlink:type', 'xmlns', 'xmlns:xlink'] - - # svgtiny - foreignObject + linearGradient + radialGradient + stop - svg_elements = ['a', 'animate', 'animateColor', 'animateMotion', - 'animateTransform', 'circle', 'defs', 'desc', 'ellipse', 'foreignObject', - 'font-face', 'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern', - 'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph', 'mpath', - 'path', 'polygon', 'polyline', 'radialGradient', 'rect', 'set', 'stop', - 'svg', 'switch', 'text', 'title', 'tspan', 'use'] - - # svgtiny + class + opacity + offset + xmlns + xmlns:xlink - svg_attributes = ['accent-height', 'accumulate', 'additive', 'alphabetic', - 'arabic-form', 'ascent', 'attributeName', 'attributeType', - 'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height', - 'class', 'color', 'color-rendering', 'content', 'cx', 'cy', 'd', 'dx', - 'dy', 'descent', 'display', 'dur', 'end', 'fill', 'fill-opacity', - 'fill-rule', 'font-family', 'font-size', 'font-stretch', 'font-style', - 'font-variant', 'font-weight', 'from', 'fx', 'fy', 'g1', 'g2', - 'glyph-name', 'gradientUnits', 'hanging', 'height', 'horiz-adv-x', - 'horiz-origin-x', 'id', 'ideographic', 'k', 'keyPoints', 'keySplines', - 'keyTimes', 'lang', 'mathematical', 'marker-end', 'marker-mid', - 'marker-start', 'markerHeight', 'markerUnits', 'markerWidth', 'max', - 'min', 'name', 'offset', 'opacity', 'orient', 'origin', - 'overline-position', 'overline-thickness', 'panose-1', 'path', - 'pathLength', 'points', 'preserveAspectRatio', 'r', 'refX', 'refY', - 'repeatCount', 'repeatDur', 'requiredExtensions', 'requiredFeatures', - 'restart', 'rotate', 'rx', 'ry', 'slope', 'stemh', 'stemv', - 'stop-color', 'stop-opacity', 'strikethrough-position', - 'strikethrough-thickness', 'stroke', 'stroke-dasharray', - 'stroke-dashoffset', 'stroke-linecap', 'stroke-linejoin', - 'stroke-miterlimit', 'stroke-opacity', 'stroke-width', 'systemLanguage', - 'target', 'text-anchor', 'to', 'transform', 'type', 'u1', 'u2', - 'underline-position', 'underline-thickness', 'unicode', 'unicode-range', - 'units-per-em', 'values', 'version', 'viewBox', 'visibility', 'width', - 'widths', 'x', 'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole', - 'xlink:href', 'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type', - 'xml:base', 'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y', 'y1', - 'y2', 'zoomAndPan'] - - svg_attr_map = None - svg_elem_map = None - - acceptable_svg_properties = [ 'fill', 'fill-opacity', 'fill-rule', - 'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin', - 'stroke-opacity'] - - def reset(self): - _BaseHTMLProcessor.reset(self) - self.unacceptablestack = 0 - self.mathmlOK = 0 - self.svgOK = 0 - - def unknown_starttag(self, tag, attrs): - acceptable_attributes = self.acceptable_attributes - keymap = {} - if not tag in self.acceptable_elements or self.svgOK: - if tag in self.unacceptable_elements_with_end_tag: - self.unacceptablestack += 1 - - # add implicit namespaces to html5 inline svg/mathml - if self._type.endswith('html'): - if not dict(attrs).get('xmlns'): - if tag=='svg': - attrs.append( ('xmlns','http://www.w3.org/2000/svg') ) - if tag=='math': - attrs.append( ('xmlns','http://www.w3.org/1998/Math/MathML') ) - - # not otherwise acceptable, perhaps it is MathML or SVG? - if tag=='math' and ('xmlns','http://www.w3.org/1998/Math/MathML') in attrs: - self.mathmlOK += 1 - if tag=='svg' and ('xmlns','http://www.w3.org/2000/svg') in attrs: - self.svgOK += 1 - - # chose acceptable attributes based on tag class, else bail - if self.mathmlOK and tag in self.mathml_elements: - acceptable_attributes = self.mathml_attributes - elif self.svgOK and tag in self.svg_elements: - # for most vocabularies, lowercasing is a good idea. Many - # svg elements, however, are camel case - if not self.svg_attr_map: - lower=[attr.lower() for attr in self.svg_attributes] - mix=[a for a in self.svg_attributes if a not in lower] - self.svg_attributes = lower - self.svg_attr_map = dict([(a.lower(),a) for a in mix]) - - lower=[attr.lower() for attr in self.svg_elements] - mix=[a for a in self.svg_elements if a not in lower] - self.svg_elements = lower - self.svg_elem_map = dict([(a.lower(),a) for a in mix]) - acceptable_attributes = self.svg_attributes - tag = self.svg_elem_map.get(tag,tag) - keymap = self.svg_attr_map - elif not tag in self.acceptable_elements: - return - - # declare xlink namespace, if needed - if self.mathmlOK or self.svgOK: - if filter(lambda (n,v): n.startswith('xlink:'),attrs): - if not ('xmlns:xlink','http://www.w3.org/1999/xlink') in attrs: - attrs.append(('xmlns:xlink','http://www.w3.org/1999/xlink')) - - clean_attrs = [] - for key, value in self.normalize_attrs(attrs): - if key in acceptable_attributes: - key=keymap.get(key,key) - clean_attrs.append((key,value)) - elif key=='style': - clean_value = self.sanitize_style(value) - if clean_value: clean_attrs.append((key,clean_value)) - _BaseHTMLProcessor.unknown_starttag(self, tag, clean_attrs) - - def unknown_endtag(self, tag): - if not tag in self.acceptable_elements: - if tag in self.unacceptable_elements_with_end_tag: - self.unacceptablestack -= 1 - if self.mathmlOK and tag in self.mathml_elements: - if tag == 'math' and self.mathmlOK: self.mathmlOK -= 1 - elif self.svgOK and tag in self.svg_elements: - tag = self.svg_elem_map.get(tag,tag) - if tag == 'svg' and self.svgOK: self.svgOK -= 1 - else: - return - _BaseHTMLProcessor.unknown_endtag(self, tag) - - def handle_pi(self, text): - pass - - def handle_decl(self, text): - pass - - def handle_data(self, text): - if not self.unacceptablestack: - _BaseHTMLProcessor.handle_data(self, text) - - def sanitize_style(self, style): - # disallow urls - style=re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ',style) - - # gauntlet - if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style): return '' - # This replaced a regexp that used re.match and was prone to pathological back-tracking. - if re.sub("\s*[-\w]+\s*:\s*[^:;]*;?", '', style).strip(): return '' - - clean = [] - for prop,value in re.findall("([-\w]+)\s*:\s*([^:;]*)",style): - if not value: continue - if prop.lower() in self.acceptable_css_properties: - clean.append(prop + ': ' + value + ';') - elif prop.split('-')[0].lower() in ['background','border','margin','padding']: - for keyword in value.split(): - if not keyword in self.acceptable_css_keywords and \ - not self.valid_css_values.match(keyword): - break - else: - clean.append(prop + ': ' + value + ';') - elif self.svgOK and prop.lower() in self.acceptable_svg_properties: - clean.append(prop + ': ' + value + ';') - - return ' '.join(clean) - - -def _sanitizeHTML(htmlSource, encoding, _type): - p = _HTMLSanitizer(encoding, _type) - htmlSource = htmlSource.replace('<![CDATA[', '<![CDATA[') - p.feed(htmlSource) - data = p.output() - if TIDY_MARKUP: - # loop through list of preferred Tidy interfaces looking for one that's installed, - # then set up a common _tidy function to wrap the interface-specific API. - _tidy = None - for tidy_interface in PREFERRED_TIDY_INTERFACES: - try: - if tidy_interface == "uTidy": - from tidy import parseString as _utidy - def _tidy(data, **kwargs): - return str(_utidy(data, **kwargs)) - break - elif tidy_interface == "mxTidy": - from mx.Tidy import Tidy as _mxtidy - def _tidy(data, **kwargs): - nerrors, nwarnings, data, errordata = _mxtidy.tidy(data, **kwargs) - return data - break - except: - pass - if _tidy: - utf8 = type(data) == type(u'') - if utf8: - data = data.encode('utf-8') - data = _tidy(data, output_xhtml=1, numeric_entities=1, wrap=0, char_encoding="utf8") - if utf8: - data = unicode(data, 'utf-8') - if data.count('<body'): - data = data.split('<body', 1)[1] - if data.count('>'): - data = data.split('>', 1)[1] - if data.count('</body'): - data = data.split('</body', 1)[0] - data = data.strip().replace('\r\n', '\n') - return data - -class _FeedURLHandler(urllib2.HTTPDigestAuthHandler, urllib2.HTTPRedirectHandler, urllib2.HTTPDefaultErrorHandler): - def http_error_default(self, req, fp, code, msg, headers): - if ((code / 100) == 3) and (code != 304): - return self.http_error_302(req, fp, code, msg, headers) - infourl = urllib.addinfourl(fp, headers, req.get_full_url()) - infourl.status = code - return infourl - - def http_error_302(self, req, fp, code, msg, headers): - if headers.dict.has_key('location'): - infourl = urllib2.HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers) - else: - infourl = urllib.addinfourl(fp, headers, req.get_full_url()) - if not hasattr(infourl, 'status'): - infourl.status = code - return infourl - - def http_error_301(self, req, fp, code, msg, headers): - if headers.dict.has_key('location'): - infourl = urllib2.HTTPRedirectHandler.http_error_301(self, req, fp, code, msg, headers) - else: - infourl = urllib.addinfourl(fp, headers, req.get_full_url()) - if not hasattr(infourl, 'status'): - infourl.status = code - return infourl - - http_error_300 = http_error_302 - http_error_303 = http_error_302 - http_error_307 = http_error_302 - - def http_error_401(self, req, fp, code, msg, headers): - # Check if - # - server requires digest auth, AND - # - we tried (unsuccessfully) with basic auth, AND - # - we're using Python 2.3.3 or later (digest auth is irreparably broken in earlier versions) - # If all conditions hold, parse authentication information - # out of the Authorization header we sent the first time - # (for the username and password) and the WWW-Authenticate - # header the server sent back (for the realm) and retry - # the request with the appropriate digest auth headers instead. - # This evil genius hack has been brought to you by Aaron Swartz. - host = urlparse.urlparse(req.get_full_url())[1] - try: - assert sys.version.split()[0] >= '2.3.3' - assert base64 is not None - user, passw = _base64decode(req.headers['Authorization'].split(' ')[1]).split(':') - realm = re.findall('realm="([^"]*)"', headers['WWW-Authenticate'])[0] - self.add_password(realm, host, user, passw) - retry = self.http_error_auth_reqed('www-authenticate', host, req, headers) - self.reset_retry_count() - return retry - except: - return self.http_error_default(req, fp, code, msg, headers) - -def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers, request_headers): - """URL, filename, or string --> stream - - This function lets you define parsers that take any input source - (URL, pathname to local or network file, or actual data as a string) - and deal with it in a uniform manner. Returned object is guaranteed - to have all the basic stdio read methods (read, readline, readlines). - Just .close() the object when you're done with it. - - If the etag argument is supplied, it will be used as the value of an - If-None-Match request header. - - If the modified argument is supplied, it can be a tuple of 9 integers - (as returned by gmtime() in the standard Python time module) or a date - string in any format supported by feedparser. Regardless, it MUST - be in GMT (Greenwich Mean Time). It will be reformatted into an - RFC 1123-compliant date and used as the value of an If-Modified-Since - request header. - - If the agent argument is supplied, it will be used as the value of a - User-Agent request header. - - If the referrer argument is supplied, it will be used as the value of a - Referer[sic] request header. - - If handlers is supplied, it is a list of handlers used to build a - urllib2 opener. - - if request_headers is supplied it is a dictionary of HTTP request headers - that will override the values generated by FeedParser. - """ - - if hasattr(url_file_stream_or_string, 'read'): - return url_file_stream_or_string - - if url_file_stream_or_string == '-': - return sys.stdin - - if urlparse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp', 'file', 'feed'): - # Deal with the feed URI scheme - if url_file_stream_or_string.startswith('feed:http'): - url_file_stream_or_string = url_file_stream_or_string[5:] - elif url_file_stream_or_string.startswith('feed:'): - url_file_stream_or_string = 'http:' + url_file_stream_or_string[5:] - if not agent: - agent = USER_AGENT - # test for inline user:password for basic auth - auth = None - if base64: - urltype, rest = urllib.splittype(url_file_stream_or_string) - realhost, rest = urllib.splithost(rest) - if realhost: - user_passwd, realhost = urllib.splituser(realhost) - if user_passwd: - url_file_stream_or_string = '%s://%s%s' % (urltype, realhost, rest) - auth = base64.standard_b64encode(user_passwd).strip() - - # iri support - try: - if isinstance(url_file_stream_or_string,unicode): - url_file_stream_or_string = url_file_stream_or_string.encode('idna').decode('utf-8') - else: - url_file_stream_or_string = url_file_stream_or_string.decode('utf-8').encode('idna').decode('utf-8') - except: - pass - - # try to open with urllib2 (to use optional headers) - request = _build_urllib2_request(url_file_stream_or_string, agent, etag, modified, referrer, auth, request_headers) - opener = apply(urllib2.build_opener, tuple(handlers + [_FeedURLHandler()])) - opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent - try: - return opener.open(request) - finally: - opener.close() # JohnD - - # try to open with native open function (if url_file_stream_or_string is a filename) - try: - return open(url_file_stream_or_string, 'rb') - except: - pass - - # treat url_file_stream_or_string as string - return _StringIO(str(url_file_stream_or_string)) - -def _build_urllib2_request(url, agent, etag, modified, referrer, auth, request_headers): - request = urllib2.Request(url) - request.add_header('User-Agent', agent) - if etag: - request.add_header('If-None-Match', etag) - if type(modified) == type(''): - modified = _parse_date(modified) - elif isinstance(modified, datetime.datetime): - modified = modified.utctimetuple() - if modified: - # format into an RFC 1123-compliant timestamp. We can't use - # time.strftime() since the %a and %b directives can be affected - # by the current locale, but RFC 2616 states that dates must be - # in English. - short_weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] - months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] - request.add_header('If-Modified-Since', '%s, %02d %s %04d %02d:%02d:%02d GMT' % (short_weekdays[modified[6]], modified[2], months[modified[1] - 1], modified[0], modified[3], modified[4], modified[5])) - if referrer: - request.add_header('Referer', referrer) - if gzip and zlib: - request.add_header('Accept-encoding', 'gzip, deflate') - elif gzip: - request.add_header('Accept-encoding', 'gzip') - elif zlib: - request.add_header('Accept-encoding', 'deflate') - else: - request.add_header('Accept-encoding', '') - if auth: - request.add_header('Authorization', 'Basic %s' % auth) - if ACCEPT_HEADER: - request.add_header('Accept', ACCEPT_HEADER) - # use this for whatever -- cookies, special headers, etc - # [('Cookie','Something'),('x-special-header','Another Value')] - for header_name, header_value in request_headers.items(): - request.add_header(header_name, header_value) - request.add_header('A-IM', 'feed') # RFC 3229 support - return request - -_date_handlers = [] -def registerDateHandler(func): - '''Register a date handler function (takes string, returns 9-tuple date in GMT)''' - _date_handlers.insert(0, func) - -# ISO-8601 date parsing routines written by Fazal Majid. -# The ISO 8601 standard is very convoluted and irregular - a full ISO 8601 -# parser is beyond the scope of feedparser and would be a worthwhile addition -# to the Python library. -# A single regular expression cannot parse ISO 8601 date formats into groups -# as the standard is highly irregular (for instance is 030104 2003-01-04 or -# 0301-04-01), so we use templates instead. -# Please note the order in templates is significant because we need a -# greedy match. -_iso8601_tmpl = ['YYYY-?MM-?DD', 'YYYY-0MM?-?DD', 'YYYY-MM', 'YYYY-?OOO', - 'YY-?MM-?DD', 'YY-?OOO', 'YYYY', - '-YY-?MM', '-OOO', '-YY', - '--MM-?DD', '--MM', - '---DD', - 'CC', ''] -_iso8601_re = [ - tmpl.replace( - 'YYYY', r'(?P<year>\d{4})').replace( - 'YY', r'(?P<year>\d\d)').replace( - 'MM', r'(?P<month>[01]\d)').replace( - 'DD', r'(?P<day>[0123]\d)').replace( - 'OOO', r'(?P<ordinal>[0123]\d\d)').replace( - 'CC', r'(?P<century>\d\d$)') - + r'(T?(?P<hour>\d{2}):(?P<minute>\d{2})' - + r'(:(?P<second>\d{2}))?' - + r'(\.(?P<fracsecond>\d+))?' - + r'(?P<tz>[+-](?P<tzhour>\d{2})(:(?P<tzmin>\d{2}))?|Z)?)?' - for tmpl in _iso8601_tmpl] -try: - del tmpl -except NameError: - pass -_iso8601_matches = [re.compile(regex).match for regex in _iso8601_re] -try: - del regex -except NameError: - pass -def _parse_date_iso8601(dateString): - '''Parse a variety of ISO-8601-compatible formats like 20040105''' - m = None - for _iso8601_match in _iso8601_matches: - m = _iso8601_match(dateString) - if m: break - if not m: return - if m.span() == (0, 0): return - params = m.groupdict() - ordinal = params.get('ordinal', 0) - if ordinal: - ordinal = int(ordinal) - else: - ordinal = 0 - year = params.get('year', '--') - if not year or year == '--': - year = time.gmtime()[0] - elif len(year) == 2: - # ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993 - year = 100 * int(time.gmtime()[0] / 100) + int(year) - else: - year = int(year) - month = params.get('month', '-') - if not month or month == '-': - # ordinals are NOT normalized by mktime, we simulate them - # by setting month=1, day=ordinal - if ordinal: - month = 1 - else: - month = time.gmtime()[1] - month = int(month) - day = params.get('day', 0) - if not day: - # see above - if ordinal: - day = ordinal - elif params.get('century', 0) or \ - params.get('year', 0) or params.get('month', 0): - day = 1 - else: - day = time.gmtime()[2] - else: - day = int(day) - # special case of the century - is the first year of the 21st century - # 2000 or 2001 ? The debate goes on... - if 'century' in params.keys(): - year = (int(params['century']) - 1) * 100 + 1 - # in ISO 8601 most fields are optional - for field in ['hour', 'minute', 'second', 'tzhour', 'tzmin']: - if not params.get(field, None): - params[field] = 0 - hour = int(params.get('hour', 0)) - minute = int(params.get('minute', 0)) - second = int(float(params.get('second', 0))) - # weekday is normalized by mktime(), we can ignore it - weekday = 0 - daylight_savings_flag = -1 - tm = [year, month, day, hour, minute, second, weekday, - ordinal, daylight_savings_flag] - # ISO 8601 time zone adjustments - tz = params.get('tz') - if tz and tz != 'Z': - if tz[0] == '-': - tm[3] += int(params.get('tzhour', 0)) - tm[4] += int(params.get('tzmin', 0)) - elif tz[0] == '+': - tm[3] -= int(params.get('tzhour', 0)) - tm[4] -= int(params.get('tzmin', 0)) - else: - return None - # Python's time.mktime() is a wrapper around the ANSI C mktime(3c) - # which is guaranteed to normalize d/m/y/h/m/s. - # Many implementations have bugs, but we'll pretend they don't. - return time.localtime(time.mktime(tuple(tm))) -registerDateHandler(_parse_date_iso8601) - -# 8-bit date handling routines written by ytrewq1. -_korean_year = u'\ub144' # b3e2 in euc-kr -_korean_month = u'\uc6d4' # bff9 in euc-kr -_korean_day = u'\uc77c' # c0cf in euc-kr -_korean_am = u'\uc624\uc804' # bfc0 c0fc in euc-kr -_korean_pm = u'\uc624\ud6c4' # bfc0 c8c4 in euc-kr - -_korean_onblog_date_re = \ - re.compile('(\d{4})%s\s+(\d{2})%s\s+(\d{2})%s\s+(\d{2}):(\d{2}):(\d{2})' % \ - (_korean_year, _korean_month, _korean_day)) -_korean_nate_date_re = \ - re.compile(u'(\d{4})-(\d{2})-(\d{2})\s+(%s|%s)\s+(\d{,2}):(\d{,2}):(\d{,2})' % \ - (_korean_am, _korean_pm)) -def _parse_date_onblog(dateString): - '''Parse a string according to the OnBlog 8-bit date format''' - m = _korean_onblog_date_re.match(dateString) - if not m: return - w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \ - {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\ - 'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\ - 'zonediff': '+09:00'} - if _debug: sys.stderr.write('OnBlog date parsed as: %s\n' % w3dtfdate) - return _parse_date_w3dtf(w3dtfdate) -registerDateHandler(_parse_date_onblog) - -def _parse_date_nate(dateString): - '''Parse a string according to the Nate 8-bit date format''' - m = _korean_nate_date_re.match(dateString) - if not m: return - hour = int(m.group(5)) - ampm = m.group(4) - if (ampm == _korean_pm): - hour += 12 - hour = str(hour) - if len(hour) == 1: - hour = '0' + hour - w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \ - {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\ - 'hour': hour, 'minute': m.group(6), 'second': m.group(7),\ - 'zonediff': '+09:00'} - if _debug: sys.stderr.write('Nate date parsed as: %s\n' % w3dtfdate) - return _parse_date_w3dtf(w3dtfdate) -registerDateHandler(_parse_date_nate) - -_mssql_date_re = \ - re.compile('(\d{4})-(\d{2})-(\d{2})\s+(\d{2}):(\d{2}):(\d{2})(\.\d+)?') -def _parse_date_mssql(dateString): - '''Parse a string according to the MS SQL date format''' - m = _mssql_date_re.match(dateString) - if not m: return - w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \ - {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\ - 'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\ - 'zonediff': '+09:00'} - if _debug: sys.stderr.write('MS SQL date parsed as: %s\n' % w3dtfdate) - return _parse_date_w3dtf(w3dtfdate) -registerDateHandler(_parse_date_mssql) - -# Unicode strings for Greek date strings -_greek_months = \ - { \ - u'\u0399\u03b1\u03bd': u'Jan', # c9e1ed in iso-8859-7 - u'\u03a6\u03b5\u03b2': u'Feb', # d6e5e2 in iso-8859-7 - u'\u039c\u03ac\u03ce': u'Mar', # ccdcfe in iso-8859-7 - u'\u039c\u03b1\u03ce': u'Mar', # cce1fe in iso-8859-7 - u'\u0391\u03c0\u03c1': u'Apr', # c1f0f1 in iso-8859-7 - u'\u039c\u03ac\u03b9': u'May', # ccdce9 in iso-8859-7 - u'\u039c\u03b1\u03ca': u'May', # cce1fa in iso-8859-7 - u'\u039c\u03b1\u03b9': u'May', # cce1e9 in iso-8859-7 - u'\u0399\u03bf\u03cd\u03bd': u'Jun', # c9effded in iso-8859-7 - u'\u0399\u03bf\u03bd': u'Jun', # c9efed in iso-8859-7 - u'\u0399\u03bf\u03cd\u03bb': u'Jul', # c9effdeb in iso-8859-7 - u'\u0399\u03bf\u03bb': u'Jul', # c9f9eb in iso-8859-7 - u'\u0391\u03cd\u03b3': u'Aug', # c1fde3 in iso-8859-7 - u'\u0391\u03c5\u03b3': u'Aug', # c1f5e3 in iso-8859-7 - u'\u03a3\u03b5\u03c0': u'Sep', # d3e5f0 in iso-8859-7 - u'\u039f\u03ba\u03c4': u'Oct', # cfeaf4 in iso-8859-7 - u'\u039d\u03bf\u03ad': u'Nov', # cdefdd in iso-8859-7 - u'\u039d\u03bf\u03b5': u'Nov', # cdefe5 in iso-8859-7 - u'\u0394\u03b5\u03ba': u'Dec', # c4e5ea in iso-8859-7 - } - -_greek_wdays = \ - { \ - u'\u039a\u03c5\u03c1': u'Sun', # caf5f1 in iso-8859-7 - u'\u0394\u03b5\u03c5': u'Mon', # c4e5f5 in iso-8859-7 - u'\u03a4\u03c1\u03b9': u'Tue', # d4f1e9 in iso-8859-7 - u'\u03a4\u03b5\u03c4': u'Wed', # d4e5f4 in iso-8859-7 - u'\u03a0\u03b5\u03bc': u'Thu', # d0e5ec in iso-8859-7 - u'\u03a0\u03b1\u03c1': u'Fri', # d0e1f1 in iso-8859-7 - u'\u03a3\u03b1\u03b2': u'Sat', # d3e1e2 in iso-8859-7 - } - -_greek_date_format_re = \ - re.compile(u'([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)') - -def _parse_date_greek(dateString): - '''Parse a string according to a Greek 8-bit date format.''' - m = _greek_date_format_re.match(dateString) - if not m: return - try: - wday = _greek_wdays[m.group(1)] - month = _greek_months[m.group(3)] - except: - return - rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \ - {'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\ - 'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\ - 'zonediff': m.group(8)} - if _debug: sys.stderr.write('Greek date parsed as: %s\n' % rfc822date) - return _parse_date_rfc822(rfc822date) -registerDateHandler(_parse_date_greek) - -# Unicode strings for Hungarian date strings -_hungarian_months = \ - { \ - u'janu\u00e1r': u'01', # e1 in iso-8859-2 - u'febru\u00e1ri': u'02', # e1 in iso-8859-2 - u'm\u00e1rcius': u'03', # e1 in iso-8859-2 - u'\u00e1prilis': u'04', # e1 in iso-8859-2 - u'm\u00e1ujus': u'05', # e1 in iso-8859-2 - u'j\u00fanius': u'06', # fa in iso-8859-2 - u'j\u00falius': u'07', # fa in iso-8859-2 - u'augusztus': u'08', - u'szeptember': u'09', - u'okt\u00f3ber': u'10', # f3 in iso-8859-2 - u'november': u'11', - u'december': u'12', - } - -_hungarian_date_format_re = \ - re.compile(u'(\d{4})-([^-]+)-(\d{,2})T(\d{,2}):(\d{2})((\+|-)(\d{,2}:\d{2}))') - -def _parse_date_hungarian(dateString): - '''Parse a string according to a Hungarian 8-bit date format.''' - m = _hungarian_date_format_re.match(dateString) - if not m: return - try: - month = _hungarian_months[m.group(2)] - day = m.group(3) - if len(day) == 1: - day = '0' + day - hour = m.group(4) - if len(hour) == 1: - hour = '0' + hour - except: - return - w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s' % \ - {'year': m.group(1), 'month': month, 'day': day,\ - 'hour': hour, 'minute': m.group(5),\ - 'zonediff': m.group(6)} - if _debug: sys.stderr.write('Hungarian date parsed as: %s\n' % w3dtfdate) - return _parse_date_w3dtf(w3dtfdate) -registerDateHandler(_parse_date_hungarian) - -# W3DTF-style date parsing adapted from PyXML xml.utils.iso8601, written by -# Drake and licensed under the Python license. Removed all range checking -# for month, day, hour, minute, and second, since mktime will normalize -# these later -def _parse_date_w3dtf(dateString): - def __extract_date(m): - year = int(m.group('year')) - if year < 100: - year = 100 * int(time.gmtime()[0] / 100) + int(year) - if year < 1000: - return 0, 0, 0 - julian = m.group('julian') - if julian: - julian = int(julian) - month = julian / 30 + 1 - day = julian % 30 + 1 - jday = None - while jday != julian: - t = time.mktime((year, month, day, 0, 0, 0, 0, 0, 0)) - jday = time.gmtime(t)[-2] - diff = abs(jday - julian) - if jday > julian: - if diff < day: - day = day - diff - else: - month = month - 1 - day = 31 - elif jday < julian: - if day + diff < 28: - day = day + diff - else: - month = month + 1 - return year, month, day - month = m.group('month') - day = 1 - if month is None: - month = 1 - else: - month = int(month) - day = m.group('day') - if day: - day = int(day) - else: - day = 1 - return year, month, day - - def __extract_time(m): - if not m: - return 0, 0, 0 - hours = m.group('hours') - if not hours: - return 0, 0, 0 - hours = int(hours) - minutes = int(m.group('minutes')) - seconds = m.group('seconds') - if seconds: - seconds = int(seconds) - else: - seconds = 0 - return hours, minutes, seconds - - def __extract_tzd(m): - '''Return the Time Zone Designator as an offset in seconds from UTC.''' - if not m: - return 0 - tzd = m.group('tzd') - if not tzd: - return 0 - if tzd == 'Z': - return 0 - hours = int(m.group('tzdhours')) - minutes = m.group('tzdminutes') - if minutes: - minutes = int(minutes) - else: - minutes = 0 - offset = (hours*60 + minutes) * 60 - if tzd[0] == '+': - return -offset - return offset - - __date_re = ('(?P<year>\d\d\d\d)' - '(?:(?P<dsep>-|)' - '(?:(?P<month>\d\d)(?:(?P=dsep)(?P<day>\d\d))?' - '|(?P<julian>\d\d\d)))?') - __tzd_re = '(?P<tzd>[-+](?P<tzdhours>\d\d)(?::?(?P<tzdminutes>\d\d))|Z)' - __tzd_rx = re.compile(__tzd_re) - __time_re = ('(?P<hours>\d\d)(?P<tsep>:|)(?P<minutes>\d\d)' - '(?:(?P=tsep)(?P<seconds>\d\d)(?:[.,]\d+)?)?' - + __tzd_re) - __datetime_re = '%s(?:T%s)?' % (__date_re, __time_re) - __datetime_rx = re.compile(__datetime_re) - m = __datetime_rx.match(dateString) - if (m is None) or (m.group() != dateString): return - gmt = __extract_date(m) + __extract_time(m) + (0, 0, 0) - if gmt[0] == 0: return - return time.gmtime(time.mktime(gmt) + __extract_tzd(m) - time.timezone) -registerDateHandler(_parse_date_w3dtf) - -def _parse_date_rfc822(dateString): - '''Parse an RFC822, RFC1123, RFC2822, or asctime-style date''' - data = dateString.split() - if data[0][-1] in (',', '.') or data[0].lower() in rfc822._daynames: - del data[0] - if len(data) == 4: - s = data[3] - i = s.find('+') - if i > 0: - data[3:] = [s[:i], s[i+1:]] - else: - data.append('') - dateString = " ".join(data) - # Account for the Etc/GMT timezone by stripping 'Etc/' - elif len(data) == 5 and data[4].lower().startswith('etc/'): - data[4] = data[4][4:] - dateString = " ".join(data) - if len(data) < 5: - dateString += ' 00:00:00 GMT' - tm = rfc822.parsedate_tz(dateString) - if tm: - return time.gmtime(rfc822.mktime_tz(tm)) -# rfc822.py defines several time zones, but we define some extra ones. -# 'ET' is equivalent to 'EST', etc. -_additional_timezones = {'AT': -400, 'ET': -500, 'CT': -600, 'MT': -700, 'PT': -800} -rfc822._timezones.update(_additional_timezones) -registerDateHandler(_parse_date_rfc822) - -def _parse_date_perforce(aDateString): - """parse a date in yyyy/mm/dd hh:mm:ss TTT format""" - # Fri, 2006/09/15 08:19:53 EDT - _my_date_pattern = re.compile( \ - r'(\w{,3}), (\d{,4})/(\d{,2})/(\d{2}) (\d{,2}):(\d{2}):(\d{2}) (\w{,3})') - - dow, year, month, day, hour, minute, second, tz = \ - _my_date_pattern.search(aDateString).groups() - months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] - dateString = "%s, %s %s %s %s:%s:%s %s" % (dow, day, months[int(month) - 1], year, hour, minute, second, tz) - tm = rfc822.parsedate_tz(dateString) - if tm: - return time.gmtime(rfc822.mktime_tz(tm)) -registerDateHandler(_parse_date_perforce) - -def _parse_date(dateString): - '''Parses a variety of date formats into a 9-tuple in GMT''' - for handler in _date_handlers: - try: - date9tuple = handler(dateString) - if not date9tuple: continue - if len(date9tuple) != 9: - if _debug: sys.stderr.write('date handler function must return 9-tuple\n') - raise ValueError - map(int, date9tuple) - return date9tuple - except Exception, e: - if _debug: sys.stderr.write('%s raised %s\n' % (handler.__name__, repr(e))) - pass - return None - -def _getCharacterEncoding(http_headers, xml_data): - '''Get the character encoding of the XML document - - http_headers is a dictionary - xml_data is a raw string (not Unicode) - - This is so much trickier than it sounds, it's not even funny. - According to RFC 3023 ('XML Media Types'), if the HTTP Content-Type - is application/xml, application/*+xml, - application/xml-external-parsed-entity, or application/xml-dtd, - the encoding given in the charset parameter of the HTTP Content-Type - takes precedence over the encoding given in the XML prefix within the - document, and defaults to 'utf-8' if neither are specified. But, if - the HTTP Content-Type is text/xml, text/*+xml, or - text/xml-external-parsed-entity, the encoding given in the XML prefix - within the document is ALWAYS IGNORED and only the encoding given in - the charset parameter of the HTTP Content-Type header should be - respected, and it defaults to 'us-ascii' if not specified. - - Furthermore, discussion on the atom-syntax mailing list with the - author of RFC 3023 leads me to the conclusion that any document - served with a Content-Type of text/* and no charset parameter - must be treated as us-ascii. (We now do this.) And also that it - must always be flagged as non-well-formed. (We now do this too.) - - If Content-Type is unspecified (input was local file or non-HTTP source) - or unrecognized (server just got it totally wrong), then go by the - encoding given in the XML prefix of the document and default to - 'iso-8859-1' as per the HTTP specification (RFC 2616). - - Then, assuming we didn't find a character encoding in the HTTP headers - (and the HTTP Content-type allowed us to look in the body), we need - to sniff the first few bytes of the XML data and try to determine - whether the encoding is ASCII-compatible. Section F of the XML - specification shows the way here: - http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info - - If the sniffed encoding is not ASCII-compatible, we need to make it - ASCII compatible so that we can sniff further into the XML declaration - to find the encoding attribute, which will tell us the true encoding. - - Of course, none of this guarantees that we will be able to parse the - feed in the declared character encoding (assuming it was declared - correctly, which many are not). CJKCodecs and iconv_codec help a lot; - you should definitely install them if you can. - http://cjkpython.i18n.org/ - ''' - - def _parseHTTPContentType(content_type): - '''takes HTTP Content-Type header and returns (content type, charset) - - If no charset is specified, returns (content type, '') - If no content type is specified, returns ('', '') - Both return parameters are guaranteed to be lowercase strings - ''' - content_type = content_type or '' - content_type, params = cgi.parse_header(content_type) - return content_type, params.get('charset', '').replace("'", '') - - sniffed_xml_encoding = '' - xml_encoding = '' - true_encoding = '' - http_content_type, http_encoding = _parseHTTPContentType(http_headers.get('content-type', http_headers.get('Content-type'))) - # Must sniff for non-ASCII-compatible character encodings before - # searching for XML declaration. This heuristic is defined in - # section F of the XML specification: - # http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info - try: - if xml_data[:4] == _l2bytes([0x4c, 0x6f, 0xa7, 0x94]): - # EBCDIC - xml_data = _ebcdic_to_ascii(xml_data) - elif xml_data[:4] == _l2bytes([0x00, 0x3c, 0x00, 0x3f]): - # UTF-16BE - sniffed_xml_encoding = 'utf-16be' - xml_data = unicode(xml_data, 'utf-16be').encode('utf-8') - elif (len(xml_data) >= 4) and (xml_data[:2] == _l2bytes([0xfe, 0xff])) and (xml_data[2:4] != _l2bytes([0x00, 0x00])): - # UTF-16BE with BOM - sniffed_xml_encoding = 'utf-16be' - xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8') - elif xml_data[:4] == _l2bytes([0x3c, 0x00, 0x3f, 0x00]): - # UTF-16LE - sniffed_xml_encoding = 'utf-16le' - xml_data = unicode(xml_data, 'utf-16le').encode('utf-8') - elif (len(xml_data) >= 4) and (xml_data[:2] == _l2bytes([0xff, 0xfe])) and (xml_data[2:4] != _l2bytes([0x00, 0x00])): - # UTF-16LE with BOM - sniffed_xml_encoding = 'utf-16le' - xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8') - elif xml_data[:4] == _l2bytes([0x00, 0x00, 0x00, 0x3c]): - # UTF-32BE - sniffed_xml_encoding = 'utf-32be' - xml_data = unicode(xml_data, 'utf-32be').encode('utf-8') - elif xml_data[:4] == _l2bytes([0x3c, 0x00, 0x00, 0x00]): - # UTF-32LE - sniffed_xml_encoding = 'utf-32le' - xml_data = unicode(xml_data, 'utf-32le').encode('utf-8') - elif xml_data[:4] == _l2bytes([0x00, 0x00, 0xfe, 0xff]): - # UTF-32BE with BOM - sniffed_xml_encoding = 'utf-32be' - xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8') - elif xml_data[:4] == _l2bytes([0xff, 0xfe, 0x00, 0x00]): - # UTF-32LE with BOM - sniffed_xml_encoding = 'utf-32le' - xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8') - elif xml_data[:3] == _l2bytes([0xef, 0xbb, 0xbf]): - # UTF-8 with BOM - sniffed_xml_encoding = 'utf-8' - xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8') - else: - # ASCII-compatible - pass - xml_encoding_match = re.compile(_s2bytes('^<\?.*encoding=[\'"](.*?)[\'"].*\?>')).match(xml_data) - except: - xml_encoding_match = None - if xml_encoding_match: - xml_encoding = xml_encoding_match.groups()[0].decode('utf-8').lower() - if sniffed_xml_encoding and (xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode', 'iso-10646-ucs-4', 'ucs-4', 'csucs4', 'utf-16', 'utf-32', 'utf_16', 'utf_32', 'utf16', 'u16')): - xml_encoding = sniffed_xml_encoding - acceptable_content_type = 0 - application_content_types = ('application/xml', 'application/xml-dtd', 'application/xml-external-parsed-entity') - text_content_types = ('text/xml', 'text/xml-external-parsed-entity') - if (http_content_type in application_content_types) or \ - (http_content_type.startswith('application/') and http_content_type.endswith('+xml')): - acceptable_content_type = 1 - true_encoding = http_encoding or xml_encoding or 'utf-8' - elif (http_content_type in text_content_types) or \ - (http_content_type.startswith('text/')) and http_content_type.endswith('+xml'): - acceptable_content_type = 1 - true_encoding = http_encoding or 'us-ascii' - elif http_content_type.startswith('text/'): - true_encoding = http_encoding or 'us-ascii' - elif http_headers and (not (http_headers.has_key('content-type') or http_headers.has_key('Content-type'))): - true_encoding = xml_encoding or 'iso-8859-1' - else: - true_encoding = xml_encoding or 'utf-8' - # some feeds claim to be gb2312 but are actually gb18030. - # apparently MSIE and Firefox both do the following switch: - if true_encoding.lower() == 'gb2312': - true_encoding = 'gb18030' - return true_encoding, http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type - -def _toUTF8(data, encoding): - '''Changes an XML data stream on the fly to specify a new encoding - - data is a raw sequence of bytes (not Unicode) that is presumed to be in %encoding already - encoding is a string recognized by encodings.aliases - ''' - if _debug: sys.stderr.write('entering _toUTF8, trying encoding %s\n' % encoding) - # strip Byte Order Mark (if present) - if (len(data) >= 4) and (data[:2] == _l2bytes([0xfe, 0xff])) and (data[2:4] != _l2bytes([0x00, 0x00])): - if _debug: - sys.stderr.write('stripping BOM\n') - if encoding != 'utf-16be': - sys.stderr.write('trying utf-16be instead\n') - encoding = 'utf-16be' - data = data[2:] - elif (len(data) >= 4) and (data[:2] == _l2bytes([0xff, 0xfe])) and (data[2:4] != _l2bytes([0x00, 0x00])): - if _debug: - sys.stderr.write('stripping BOM\n') - if encoding != 'utf-16le': - sys.stderr.write('trying utf-16le instead\n') - encoding = 'utf-16le' - data = data[2:] - elif data[:3] == _l2bytes([0xef, 0xbb, 0xbf]): - if _debug: - sys.stderr.write('stripping BOM\n') - if encoding != 'utf-8': - sys.stderr.write('trying utf-8 instead\n') - encoding = 'utf-8' - data = data[3:] - elif data[:4] == _l2bytes([0x00, 0x00, 0xfe, 0xff]): - if _debug: - sys.stderr.write('stripping BOM\n') - if encoding != 'utf-32be': - sys.stderr.write('trying utf-32be instead\n') - encoding = 'utf-32be' - data = data[4:] - elif data[:4] == _l2bytes([0xff, 0xfe, 0x00, 0x00]): - if _debug: - sys.stderr.write('stripping BOM\n') - if encoding != 'utf-32le': - sys.stderr.write('trying utf-32le instead\n') - encoding = 'utf-32le' - data = data[4:] - newdata = unicode(data, encoding) - if _debug: sys.stderr.write('successfully converted %s data to unicode\n' % encoding) - declmatch = re.compile('^<\?xml[^>]*?>') - newdecl = '''<?xml version='1.0' encoding='utf-8'?>''' - if declmatch.search(newdata): - newdata = declmatch.sub(newdecl, newdata) - else: - newdata = newdecl + u'\n' + newdata - return newdata.encode('utf-8') - -def _stripDoctype(data): - '''Strips DOCTYPE from XML document, returns (rss_version, stripped_data) - - rss_version may be 'rss091n' or None - stripped_data is the same XML document, minus the DOCTYPE - ''' - start = re.search(_s2bytes('<\w'), data) - start = start and start.start() or -1 - head,data = data[:start+1], data[start+1:] - - entity_pattern = re.compile(_s2bytes(r'^\s*<!ENTITY([^>]*?)>'), re.MULTILINE) - entity_results=entity_pattern.findall(head) - head = entity_pattern.sub(_s2bytes(''), head) - doctype_pattern = re.compile(_s2bytes(r'^\s*<!DOCTYPE([^>]*?)>'), re.MULTILINE) - doctype_results = doctype_pattern.findall(head) - doctype = doctype_results and doctype_results[0] or _s2bytes('') - if doctype.lower().count(_s2bytes('netscape')): - version = 'rss091n' - else: - version = None - - # only allow in 'safe' inline entity definitions - replacement=_s2bytes('') - if len(doctype_results)==1 and entity_results: - safe_pattern=re.compile(_s2bytes('\s+(\w+)\s+"(&#\w+;|[^&"]*)"')) - safe_entities=filter(lambda e: safe_pattern.match(e),entity_results) - if safe_entities: - replacement=_s2bytes('<!DOCTYPE feed [\n <!ENTITY') + _s2bytes('>\n <!ENTITY ').join(safe_entities) + _s2bytes('>\n]>') - data = doctype_pattern.sub(replacement, head) + data - - return version, data, dict(replacement and [(k.decode('utf-8'), v.decode('utf-8')) for k, v in safe_pattern.findall(replacement)]) - -def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=[], request_headers={}, response_headers={}): - '''Parse a feed from a URL, file, stream, or string. - - request_headers, if given, is a dict from http header name to value to add - to the request; this overrides internally generated values. - ''' - result = FeedParserDict() - result['feed'] = FeedParserDict() - result['entries'] = [] - if _XML_AVAILABLE: - result['bozo'] = 0 - if not isinstance(handlers, list): - handlers = [handlers] - try: - f = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers, request_headers) - data = f.read() - except Exception, e: - result['bozo'] = 1 - result['bozo_exception'] = e - data = None - f = None - - if hasattr(f, 'headers'): - result['headers'] = dict(f.headers) - # overwrite existing headers using response_headers - if 'headers' in result: - result['headers'].update(response_headers) - elif response_headers: - result['headers'] = copy.deepcopy(response_headers) - - # if feed is gzip-compressed, decompress it - if f and data and 'headers' in result: - if gzip and result['headers'].get('content-encoding') == 'gzip': - try: - data = gzip.GzipFile(fileobj=_StringIO(data)).read() - except Exception, e: - # Some feeds claim to be gzipped but they're not, so - # we get garbage. Ideally, we should re-request the - # feed without the 'Accept-encoding: gzip' header, - # but we don't. - result['bozo'] = 1 - result['bozo_exception'] = e - data = '' - elif zlib and result['headers'].get('content-encoding') == 'deflate': - try: - data = zlib.decompress(data, -zlib.MAX_WBITS) - except Exception, e: - result['bozo'] = 1 - result['bozo_exception'] = e - data = '' - - # save HTTP headers - if 'headers' in result: - if 'etag' in result['headers'] or 'ETag' in result['headers']: - etag = result['headers'].get('etag', result['headers'].get('ETag')) - if etag: - result['etag'] = etag - if 'last-modified' in result['headers'] or 'Last-Modified' in result['headers']: - modified = result['headers'].get('last-modified', result['headers'].get('Last-Modified')) - if modified: - result['modified'] = _parse_date(modified) - if hasattr(f, 'url'): - result['href'] = f.url - result['status'] = 200 - if hasattr(f, 'status'): - result['status'] = f.status - if hasattr(f, 'close'): - f.close() - - # there are four encodings to keep track of: - # - http_encoding is the encoding declared in the Content-Type HTTP header - # - xml_encoding is the encoding declared in the <?xml declaration - # - sniffed_encoding is the encoding sniffed from the first 4 bytes of the XML data - # - result['encoding'] is the actual encoding, as per RFC 3023 and a variety of other conflicting specifications - http_headers = result.get('headers', {}) - result['encoding'], http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type = \ - _getCharacterEncoding(http_headers, data) - if http_headers and (not acceptable_content_type): - if http_headers.has_key('content-type') or http_headers.has_key('Content-type'): - bozo_message = '%s is not an XML media type' % http_headers.get('content-type', http_headers.get('Content-type')) - else: - bozo_message = 'no Content-type specified' - result['bozo'] = 1 - result['bozo_exception'] = NonXMLContentType(bozo_message) - - if data is not None: - result['version'], data, entities = _stripDoctype(data) - - # ensure that baseuri is an absolute uri using an acceptable URI scheme - contentloc = http_headers.get('content-location', http_headers.get('Content-Location', '')) - href = result.get('href', '') - baseuri = _makeSafeAbsoluteURI(href, contentloc) or _makeSafeAbsoluteURI(contentloc) or href - - baselang = http_headers.get('content-language', http_headers.get('Content-Language', None)) - - # if server sent 304, we're done - if result.get('status', 0) == 304: - result['version'] = '' - result['debug_message'] = 'The feed has not changed since you last checked, ' + \ - 'so the server sent no data. This is a feature, not a bug!' - return result - - # if there was a problem downloading, we're done - if data is None: - return result - - # determine character encoding - use_strict_parser = 0 - known_encoding = 0 - tried_encodings = [] - # try: HTTP encoding, declared XML encoding, encoding sniffed from BOM - for proposed_encoding in (result['encoding'], xml_encoding, sniffed_xml_encoding): - if not proposed_encoding: continue - if proposed_encoding in tried_encodings: continue - tried_encodings.append(proposed_encoding) - try: - data = _toUTF8(data, proposed_encoding) - known_encoding = use_strict_parser = 1 - break - except: - pass - # if no luck and we have auto-detection library, try that - if (not known_encoding) and chardet: - try: - proposed_encoding = chardet.detect(data)['encoding'] - if proposed_encoding and (proposed_encoding not in tried_encodings): - tried_encodings.append(proposed_encoding) - data = _toUTF8(data, proposed_encoding) - known_encoding = use_strict_parser = 1 - except: - pass - # if still no luck and we haven't tried utf-8 yet, try that - if (not known_encoding) and ('utf-8' not in tried_encodings): - try: - proposed_encoding = 'utf-8' - tried_encodings.append(proposed_encoding) - data = _toUTF8(data, proposed_encoding) - known_encoding = use_strict_parser = 1 - except: - pass - # if still no luck and we haven't tried windows-1252 yet, try that - if (not known_encoding) and ('windows-1252' not in tried_encodings): - try: - proposed_encoding = 'windows-1252' - tried_encodings.append(proposed_encoding) - data = _toUTF8(data, proposed_encoding) - known_encoding = use_strict_parser = 1 - except: - pass - # if still no luck and we haven't tried iso-8859-2 yet, try that. - if (not known_encoding) and ('iso-8859-2' not in tried_encodings): - try: - proposed_encoding = 'iso-8859-2' - tried_encodings.append(proposed_encoding) - data = _toUTF8(data, proposed_encoding) - known_encoding = use_strict_parser = 1 - except: - pass - # if still no luck, give up - if not known_encoding: - result['bozo'] = 1 - result['bozo_exception'] = CharacterEncodingUnknown( \ - 'document encoding unknown, I tried ' + \ - '%s, %s, utf-8, windows-1252, and iso-8859-2 but nothing worked' % \ - (result['encoding'], xml_encoding)) - result['encoding'] = '' - elif proposed_encoding != result['encoding']: - result['bozo'] = 1 - result['bozo_exception'] = CharacterEncodingOverride( \ - 'document declared as %s, but parsed as %s' % \ - (result['encoding'], proposed_encoding)) - result['encoding'] = proposed_encoding - - if not _XML_AVAILABLE: - use_strict_parser = 0 - if use_strict_parser: - # initialize the SAX parser - feedparser = _StrictFeedParser(baseuri, baselang, 'utf-8') - saxparser = xml.sax.make_parser(PREFERRED_XML_PARSERS) - saxparser.setFeature(xml.sax.handler.feature_namespaces, 1) - saxparser.setContentHandler(feedparser) - saxparser.setErrorHandler(feedparser) - source = xml.sax.xmlreader.InputSource() - source.setByteStream(_StringIO(data)) - if hasattr(saxparser, '_ns_stack'): - # work around bug in built-in SAX parser (doesn't recognize xml: namespace) - # PyXML doesn't have this problem, and it doesn't have _ns_stack either - saxparser._ns_stack.append({'http://www.w3.org/XML/1998/namespace':'xml'}) - try: - saxparser.parse(source) - except Exception, e: - if _debug: - import traceback - traceback.print_stack() - traceback.print_exc() - sys.stderr.write('xml parsing failed\n') - result['bozo'] = 1 - result['bozo_exception'] = feedparser.exc or e - use_strict_parser = 0 - if not use_strict_parser: - feedparser = _LooseFeedParser(baseuri, baselang, 'utf-8', entities) - feedparser.feed(data.decode('utf-8', 'replace')) - result['feed'] = feedparser.feeddata - result['entries'] = feedparser.entries - result['version'] = result['version'] or feedparser.version - result['namespaces'] = feedparser.namespacesInUse - return result - -class Serializer: - def __init__(self, results): - self.results = results - -class TextSerializer(Serializer): - def write(self, stream=sys.stdout): - self._writer(stream, self.results, '') - - def _writer(self, stream, node, prefix): - if not node: return - if hasattr(node, 'keys'): - keys = node.keys() - keys.sort() - for k in keys: - if k in ('description', 'link'): continue - if node.has_key(k + '_detail'): continue - if node.has_key(k + '_parsed'): continue - self._writer(stream, node[k], prefix + k + '.') - elif type(node) == types.ListType: - index = 0 - for n in node: - self._writer(stream, n, prefix[:-1] + '[' + str(index) + '].') - index += 1 - else: - try: - s = str(node).encode('utf-8') - s = s.replace('\\', '\\\\') - s = s.replace('\r', '') - s = s.replace('\n', r'\n') - stream.write(prefix[:-1]) - stream.write('=') - stream.write(s) - stream.write('\n') - except: - pass - -class PprintSerializer(Serializer): - def write(self, stream=sys.stdout): - if self.results.has_key('href'): - stream.write(self.results['href'] + '\n\n') - from pprint import pprint - pprint(self.results, stream) - stream.write('\n') - -if __name__ == '__main__': - try: - from optparse import OptionParser - except: - OptionParser = None - - if OptionParser: - optionParser = OptionParser(version=__version__, usage="%prog [options] url_or_filename_or_-") - optionParser.set_defaults(format="pprint") - optionParser.add_option("-A", "--user-agent", dest="agent", metavar="AGENT", help="User-Agent for HTTP URLs") - optionParser.add_option("-e", "--referer", "--referrer", dest="referrer", metavar="URL", help="Referrer for HTTP URLs") - optionParser.add_option("-t", "--etag", dest="etag", metavar="TAG", help="ETag/If-None-Match for HTTP URLs") - optionParser.add_option("-m", "--last-modified", dest="modified", metavar="DATE", help="Last-modified/If-Modified-Since for HTTP URLs (any supported date format)") - optionParser.add_option("-f", "--format", dest="format", metavar="FORMAT", help="output results in FORMAT (text, pprint)") - optionParser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False, help="write debugging information to stderr") - (options, urls) = optionParser.parse_args() - if options.verbose: - _debug = 1 - if not urls: - optionParser.print_help() - sys.exit(0) - else: - if not sys.argv[1:]: - print __doc__ - sys.exit(0) - class _Options: - etag = modified = agent = referrer = None - format = 'pprint' - options = _Options() - urls = sys.argv[1:] - - zopeCompatibilityHack() - - serializer = globals().get(options.format.capitalize() + 'Serializer', Serializer) - for url in urls: - results = parse(url, etag=options.etag, modified=options.modified, agent=options.agent, referrer=options.referrer) - serializer(results).write(sys.stdout) diff --git a/module/lib/forwarder.py b/module/lib/forwarder.py new file mode 100644 index 000000000..eacb33c2b --- /dev/null +++ b/module/lib/forwarder.py @@ -0,0 +1,73 @@ +# -*- coding: utf-8 -*- + +""" + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3 of the License, + or (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + See the GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, see <http://www.gnu.org/licenses/>. + + @author: RaNaN +""" + +from sys import argv +from sys import exit + +import socket +import thread + +from traceback import print_exc + +class Forwarder(): + + def __init__(self, extip,extport=9666): + print "Start portforwarding to %s:%s" % (extip, extport) + proxy(extip, extport, 9666) + + +def proxy(*settings): + while True: + server(*settings) + +def server(*settings): + try: + dock_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + dock_socket.bind(("127.0.0.1", settings[2])) + dock_socket.listen(5) + while True: + client_socket = dock_socket.accept()[0] + server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + server_socket.connect((settings[0], settings[1])) + thread.start_new_thread(forward, (client_socket, server_socket)) + thread.start_new_thread(forward, (server_socket, client_socket)) + except Exception: + print_exc() + + +def forward(source, destination): + string = ' ' + while string: + string = source.recv(1024) + if string: + destination.sendall(string) + else: + #source.shutdown(socket.SHUT_RD) + destination.shutdown(socket.SHUT_WR) + +if __name__ == "__main__": + args = argv[1:] + if not args: + print "Usage: forwarder.py <remote ip> <remote port>" + exit() + if len(args) == 1: + args.append(9666) + + f = Forwarder(args[0], int(args[1])) +
\ No newline at end of file diff --git a/module/lib/hg_tool.py b/module/lib/hg_tool.py new file mode 100644 index 000000000..cd97833df --- /dev/null +++ b/module/lib/hg_tool.py @@ -0,0 +1,133 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +import re +from subprocess import Popen, PIPE +from time import time, gmtime, strftime + +aliases = {"zoidber": "zoidberg", "zoidberg10": "zoidberg", "webmaster": "dhmh", "mast3rranan": "ranan", + "ranan2": "ranan"} +exclude = ["locale/*", "module/lib/*"] +date_format = "%Y-%m-%d" +line_re = re.compile(r" (\d+) \**", re.I) + +def add_exclude_flags(args): + for dir in exclude: + args.extend(["-X", dir]) + +# remove small percentages +def wipe(data, perc=1): + s = (sum(data.values()) * perc) / 100 + for k, v in data.items(): + if v < s: del data[k] + + return data + +# remove aliases +def de_alias(data): + for k, v in aliases.iteritems(): + if k not in data: continue + alias = aliases[k] + + if alias in data: data[alias] += data[k] + else: data[alias] = data[k] + + del data[k] + + return data + + +def output(data): + s = float(sum(data.values())) + print "Total Lines: %d" % s + for k, v in data.iteritems(): + print "%15s: %.1f%% | %d" % (k, (v * 100) / s, v) + print + + +def file_list(): + args = ["hg", "status", "-A"] + add_exclude_flags(args) + p = Popen(args, stdout=PIPE) + out, err = p.communicate() + return [x.split()[1] for x in out.splitlines() if x.split()[0] in "CMA"] + + +def hg_annotate(path): + args = ["hg", "annotate", "-u", path] + p = Popen(args, stdout=PIPE) + out, err = p.communicate() + + data = {} + + for line in out.splitlines(): + author, non, line = line.partition(":") + + # probably binary file + if author == path: return {} + + author = author.strip().lower() + if not line.strip(): continue # don't count blank lines + + if author in data: data[author] += 1 + else: data[author] = 1 + + return de_alias(data) + + +def hg_churn(days=None): + args = ["hg", "churn"] + if days: + args.append("-d") + t = time() - 60 * 60 * 24 * days + args.append("%s to %s" % (strftime(date_format, gmtime(t)), strftime(date_format))) + + add_exclude_flags(args) + p = Popen(args, stdout=PIPE) + out, err = p.communicate() + + data = {} + + for line in out.splitlines(): + m = line_re.search(line) + author = line.split()[0] + lines = int(m.group(1)) + + if "@" in author: + author, n, email = author.partition("@") + + author = author.strip().lower() + + if author in data: data[author] += lines + else: data[author] = lines + + return de_alias(data) + + +def complete_annotate(): + files = file_list() + data = {} + for f in files: + tmp = hg_annotate(f) + for k, v in tmp.iteritems(): + if k in data: data[k] += v + else: data[k] = v + + return data + + +if __name__ == "__main__": + for d in (30, 90, 180): + c = wipe(hg_churn(d)) + print "Changes in %d days:" % d + output(c) + + c = wipe(hg_churn()) + print "Total changes:" + output(c) + + print "Current source code version:" + data = wipe(complete_annotate()) + output(data) + + diff --git a/module/lib/mod_pywebsocket/COPYING b/module/lib/mod_pywebsocket/COPYING new file mode 100644 index 000000000..989d02e4c --- /dev/null +++ b/module/lib/mod_pywebsocket/COPYING @@ -0,0 +1,28 @@ +Copyright 2012, Google Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/module/lib/mod_pywebsocket/__init__.py b/module/lib/mod_pywebsocket/__init__.py new file mode 100644 index 000000000..454ae0c45 --- /dev/null +++ b/module/lib/mod_pywebsocket/__init__.py @@ -0,0 +1,197 @@ +# Copyright 2011, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +"""WebSocket extension for Apache HTTP Server. + +mod_pywebsocket is a WebSocket extension for Apache HTTP Server +intended for testing or experimental purposes. mod_python is required. + + +Installation +============ + +0. Prepare an Apache HTTP Server for which mod_python is enabled. + +1. Specify the following Apache HTTP Server directives to suit your + configuration. + + If mod_pywebsocket is not in the Python path, specify the following. + <websock_lib> is the directory where mod_pywebsocket is installed. + + PythonPath "sys.path+['<websock_lib>']" + + Always specify the following. <websock_handlers> is the directory where + user-written WebSocket handlers are placed. + + PythonOption mod_pywebsocket.handler_root <websock_handlers> + PythonHeaderParserHandler mod_pywebsocket.headerparserhandler + + To limit the search for WebSocket handlers to a directory <scan_dir> + under <websock_handlers>, configure as follows: + + PythonOption mod_pywebsocket.handler_scan <scan_dir> + + <scan_dir> is useful in saving scan time when <websock_handlers> + contains many non-WebSocket handler files. + + If you want to allow handlers whose canonical path is not under the root + directory (i.e. symbolic link is in root directory but its target is not), + configure as follows: + + PythonOption mod_pywebsocket.allow_handlers_outside_root_dir On + + Example snippet of httpd.conf: + (mod_pywebsocket is in /websock_lib, WebSocket handlers are in + /websock_handlers, port is 80 for ws, 443 for wss.) + + <IfModule python_module> + PythonPath "sys.path+['/websock_lib']" + PythonOption mod_pywebsocket.handler_root /websock_handlers + PythonHeaderParserHandler mod_pywebsocket.headerparserhandler + </IfModule> + +2. Tune Apache parameters for serving WebSocket. We'd like to note that at + least TimeOut directive from core features and RequestReadTimeout + directive from mod_reqtimeout should be modified not to kill connections + in only a few seconds of idle time. + +3. Verify installation. You can use example/console.html to poke the server. + + +Writing WebSocket handlers +========================== + +When a WebSocket request comes in, the resource name +specified in the handshake is considered as if it is a file path under +<websock_handlers> and the handler defined in +<websock_handlers>/<resource_name>_wsh.py is invoked. + +For example, if the resource name is /example/chat, the handler defined in +<websock_handlers>/example/chat_wsh.py is invoked. + +A WebSocket handler is composed of the following three functions: + + web_socket_do_extra_handshake(request) + web_socket_transfer_data(request) + web_socket_passive_closing_handshake(request) + +where: + request: mod_python request. + +web_socket_do_extra_handshake is called during the handshake after the +headers are successfully parsed and WebSocket properties (ws_location, +ws_origin, and ws_resource) are added to request. A handler +can reject the request by raising an exception. + +A request object has the following properties that you can use during the +extra handshake (web_socket_do_extra_handshake): +- ws_resource +- ws_origin +- ws_version +- ws_location (HyBi 00 only) +- ws_extensions (HyBi 06 and later) +- ws_deflate (HyBi 06 and later) +- ws_protocol +- ws_requested_protocols (HyBi 06 and later) + +The last two are a bit tricky. See the next subsection. + + +Subprotocol Negotiation +----------------------- + +For HyBi 06 and later, ws_protocol is always set to None when +web_socket_do_extra_handshake is called. If ws_requested_protocols is not +None, you must choose one subprotocol from this list and set it to +ws_protocol. + +For HyBi 00, when web_socket_do_extra_handshake is called, +ws_protocol is set to the value given by the client in +Sec-WebSocket-Protocol header or None if +such header was not found in the opening handshake request. Finish extra +handshake with ws_protocol untouched to accept the request subprotocol. +Then, Sec-WebSocket-Protocol header will be sent to +the client in response with the same value as requested. Raise an exception +in web_socket_do_extra_handshake to reject the requested subprotocol. + + +Data Transfer +------------- + +web_socket_transfer_data is called after the handshake completed +successfully. A handler can receive/send messages from/to the client +using request. mod_pywebsocket.msgutil module provides utilities +for data transfer. + +You can receive a message by the following statement. + + message = request.ws_stream.receive_message() + +This call blocks until any complete text frame arrives, and the payload data +of the incoming frame will be stored into message. When you're using IETF +HyBi 00 or later protocol, receive_message() will return None on receiving +client-initiated closing handshake. When any error occurs, receive_message() +will raise some exception. + +You can send a message by the following statement. + + request.ws_stream.send_message(message) + + +Closing Connection +------------------ + +Executing the following statement or just return-ing from +web_socket_transfer_data cause connection close. + + request.ws_stream.close_connection() + +close_connection will wait +for closing handshake acknowledgement coming from the client. When it +couldn't receive a valid acknowledgement, raises an exception. + +web_socket_passive_closing_handshake is called after the server receives +incoming closing frame from the client peer immediately. You can specify +code and reason by return values. They are sent as a outgoing closing frame +from the server. A request object has the following properties that you can +use in web_socket_passive_closing_handshake. +- ws_close_code +- ws_close_reason + + +Threading +--------- + +A WebSocket handler must be thread-safe if the server (Apache or +standalone.py) is configured to use threads. +""" + + +# vi:sts=4 sw=4 et tw=72 diff --git a/module/lib/mod_pywebsocket/_stream_base.py b/module/lib/mod_pywebsocket/_stream_base.py new file mode 100644 index 000000000..60fb33d2c --- /dev/null +++ b/module/lib/mod_pywebsocket/_stream_base.py @@ -0,0 +1,165 @@ +# Copyright 2011, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +"""Base stream class. +""" + + +# Note: request.connection.write/read are used in this module, even though +# mod_python document says that they should be used only in connection +# handlers. Unfortunately, we have no other options. For example, +# request.write/read are not suitable because they don't allow direct raw bytes +# writing/reading. + + +from mod_pywebsocket import util + + +# Exceptions + + +class ConnectionTerminatedException(Exception): + """This exception will be raised when a connection is terminated + unexpectedly. + """ + + pass + + +class InvalidFrameException(ConnectionTerminatedException): + """This exception will be raised when we received an invalid frame we + cannot parse. + """ + + pass + + +class BadOperationException(Exception): + """This exception will be raised when send_message() is called on + server-terminated connection or receive_message() is called on + client-terminated connection. + """ + + pass + + +class UnsupportedFrameException(Exception): + """This exception will be raised when we receive a frame with flag, opcode + we cannot handle. Handlers can just catch and ignore this exception and + call receive_message() again to continue processing the next frame. + """ + + pass + + +class InvalidUTF8Exception(Exception): + """This exception will be raised when we receive a text frame which + contains invalid UTF-8 strings. + """ + + pass + + +class StreamBase(object): + """Base stream class.""" + + def __init__(self, request): + """Construct an instance. + + Args: + request: mod_python request. + """ + + self._logger = util.get_class_logger(self) + + self._request = request + + def _read(self, length): + """Reads length bytes from connection. In case we catch any exception, + prepends remote address to the exception message and raise again. + + Raises: + ConnectionTerminatedException: when read returns empty string. + """ + + bytes = self._request.connection.read(length) + if not bytes: + raise ConnectionTerminatedException( + 'Receiving %d byte failed. Peer (%r) closed connection' % + (length, (self._request.connection.remote_addr,))) + return bytes + + def _write(self, bytes): + """Writes given bytes to connection. In case we catch any exception, + prepends remote address to the exception message and raise again. + """ + + try: + self._request.connection.write(bytes) + except Exception, e: + util.prepend_message_to_exception( + 'Failed to send message to %r: ' % + (self._request.connection.remote_addr,), + e) + raise + + def receive_bytes(self, length): + """Receives multiple bytes. Retries read when we couldn't receive the + specified amount. + + Raises: + ConnectionTerminatedException: when read returns empty string. + """ + + bytes = [] + while length > 0: + new_bytes = self._read(length) + bytes.append(new_bytes) + length -= len(new_bytes) + return ''.join(bytes) + + def _read_until(self, delim_char): + """Reads bytes until we encounter delim_char. The result will not + contain delim_char. + + Raises: + ConnectionTerminatedException: when read returns empty string. + """ + + bytes = [] + while True: + ch = self._read(1) + if ch == delim_char: + break + bytes.append(ch) + return ''.join(bytes) + + +# vi:sts=4 sw=4 et diff --git a/module/lib/mod_pywebsocket/_stream_hixie75.py b/module/lib/mod_pywebsocket/_stream_hixie75.py new file mode 100644 index 000000000..94cf5b31b --- /dev/null +++ b/module/lib/mod_pywebsocket/_stream_hixie75.py @@ -0,0 +1,229 @@ +# Copyright 2011, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +"""This file provides a class for parsing/building frames of the WebSocket +protocol version HyBi 00 and Hixie 75. + +Specification: +- HyBi 00 http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-00 +- Hixie 75 http://tools.ietf.org/html/draft-hixie-thewebsocketprotocol-75 +""" + + +from mod_pywebsocket import common +from mod_pywebsocket._stream_base import BadOperationException +from mod_pywebsocket._stream_base import ConnectionTerminatedException +from mod_pywebsocket._stream_base import InvalidFrameException +from mod_pywebsocket._stream_base import StreamBase +from mod_pywebsocket._stream_base import UnsupportedFrameException +from mod_pywebsocket import util + + +class StreamHixie75(StreamBase): + """A class for parsing/building frames of the WebSocket protocol version + HyBi 00 and Hixie 75. + """ + + def __init__(self, request, enable_closing_handshake=False): + """Construct an instance. + + Args: + request: mod_python request. + enable_closing_handshake: to let StreamHixie75 perform closing + handshake as specified in HyBi 00, set + this option to True. + """ + + StreamBase.__init__(self, request) + + self._logger = util.get_class_logger(self) + + self._enable_closing_handshake = enable_closing_handshake + + self._request.client_terminated = False + self._request.server_terminated = False + + def send_message(self, message, end=True, binary=False): + """Send message. + + Args: + message: unicode string to send. + binary: not used in hixie75. + + Raises: + BadOperationException: when called on a server-terminated + connection. + """ + + if not end: + raise BadOperationException( + 'StreamHixie75 doesn\'t support send_message with end=False') + + if binary: + raise BadOperationException( + 'StreamHixie75 doesn\'t support send_message with binary=True') + + if self._request.server_terminated: + raise BadOperationException( + 'Requested send_message after sending out a closing handshake') + + self._write(''.join(['\x00', message.encode('utf-8'), '\xff'])) + + def _read_payload_length_hixie75(self): + """Reads a length header in a Hixie75 version frame with length. + + Raises: + ConnectionTerminatedException: when read returns empty string. + """ + + length = 0 + while True: + b_str = self._read(1) + b = ord(b_str) + length = length * 128 + (b & 0x7f) + if (b & 0x80) == 0: + break + return length + + def receive_message(self): + """Receive a WebSocket frame and return its payload an unicode string. + + Returns: + payload unicode string in a WebSocket frame. + + Raises: + ConnectionTerminatedException: when read returns empty + string. + BadOperationException: when called on a client-terminated + connection. + """ + + if self._request.client_terminated: + raise BadOperationException( + 'Requested receive_message after receiving a closing ' + 'handshake') + + while True: + # Read 1 byte. + # mp_conn.read will block if no bytes are available. + # Timeout is controlled by TimeOut directive of Apache. + frame_type_str = self.receive_bytes(1) + frame_type = ord(frame_type_str) + if (frame_type & 0x80) == 0x80: + # The payload length is specified in the frame. + # Read and discard. + length = self._read_payload_length_hixie75() + if length > 0: + _ = self.receive_bytes(length) + # 5.3 3. 12. if /type/ is 0xFF and /length/ is 0, then set the + # /client terminated/ flag and abort these steps. + if not self._enable_closing_handshake: + continue + + if frame_type == 0xFF and length == 0: + self._request.client_terminated = True + + if self._request.server_terminated: + self._logger.debug( + 'Received ack for server-initiated closing ' + 'handshake') + return None + + self._logger.debug( + 'Received client-initiated closing handshake') + + self._send_closing_handshake() + self._logger.debug( + 'Sent ack for client-initiated closing handshake') + return None + else: + # The payload is delimited with \xff. + bytes = self._read_until('\xff') + # The WebSocket protocol section 4.4 specifies that invalid + # characters must be replaced with U+fffd REPLACEMENT + # CHARACTER. + message = bytes.decode('utf-8', 'replace') + if frame_type == 0x00: + return message + # Discard data of other types. + + def _send_closing_handshake(self): + if not self._enable_closing_handshake: + raise BadOperationException( + 'Closing handshake is not supported in Hixie 75 protocol') + + self._request.server_terminated = True + + # 5.3 the server may decide to terminate the WebSocket connection by + # running through the following steps: + # 1. send a 0xFF byte and a 0x00 byte to the client to indicate the + # start of the closing handshake. + self._write('\xff\x00') + + def close_connection(self, unused_code='', unused_reason=''): + """Closes a WebSocket connection. + + Raises: + ConnectionTerminatedException: when closing handshake was + not successfull. + """ + + if self._request.server_terminated: + self._logger.debug( + 'Requested close_connection but server is already terminated') + return + + if not self._enable_closing_handshake: + self._request.server_terminated = True + self._logger.debug('Connection closed') + return + + self._send_closing_handshake() + self._logger.debug('Sent server-initiated closing handshake') + + # TODO(ukai): 2. wait until the /client terminated/ flag has been set, + # or until a server-defined timeout expires. + # + # For now, we expect receiving closing handshake right after sending + # out closing handshake, and if we couldn't receive non-handshake + # frame, we take it as ConnectionTerminatedException. + message = self.receive_message() + if message is not None: + raise ConnectionTerminatedException( + 'Didn\'t receive valid ack for closing handshake') + # TODO: 3. close the WebSocket connection. + # note: mod_python Connection (mp_conn) doesn't have close method. + + def send_ping(self, body): + raise BadOperationException( + 'StreamHixie75 doesn\'t support send_ping') + + +# vi:sts=4 sw=4 et diff --git a/module/lib/mod_pywebsocket/_stream_hybi.py b/module/lib/mod_pywebsocket/_stream_hybi.py new file mode 100644 index 000000000..bd158fa6b --- /dev/null +++ b/module/lib/mod_pywebsocket/_stream_hybi.py @@ -0,0 +1,915 @@ +# Copyright 2012, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +"""This file provides classes and helper functions for parsing/building frames +of the WebSocket protocol (RFC 6455). + +Specification: +http://tools.ietf.org/html/rfc6455 +""" + + +from collections import deque +import logging +import os +import struct +import time + +from mod_pywebsocket import common +from mod_pywebsocket import util +from mod_pywebsocket._stream_base import BadOperationException +from mod_pywebsocket._stream_base import ConnectionTerminatedException +from mod_pywebsocket._stream_base import InvalidFrameException +from mod_pywebsocket._stream_base import InvalidUTF8Exception +from mod_pywebsocket._stream_base import StreamBase +from mod_pywebsocket._stream_base import UnsupportedFrameException + + +_NOOP_MASKER = util.NoopMasker() + + +class Frame(object): + + def __init__(self, fin=1, rsv1=0, rsv2=0, rsv3=0, + opcode=None, payload=''): + self.fin = fin + self.rsv1 = rsv1 + self.rsv2 = rsv2 + self.rsv3 = rsv3 + self.opcode = opcode + self.payload = payload + + +# Helper functions made public to be used for writing unittests for WebSocket +# clients. + + +def create_length_header(length, mask): + """Creates a length header. + + Args: + length: Frame length. Must be less than 2^63. + mask: Mask bit. Must be boolean. + + Raises: + ValueError: when bad data is given. + """ + + if mask: + mask_bit = 1 << 7 + else: + mask_bit = 0 + + if length < 0: + raise ValueError('length must be non negative integer') + elif length <= 125: + return chr(mask_bit | length) + elif length < (1 << 16): + return chr(mask_bit | 126) + struct.pack('!H', length) + elif length < (1 << 63): + return chr(mask_bit | 127) + struct.pack('!Q', length) + else: + raise ValueError('Payload is too big for one frame') + + +def create_header(opcode, payload_length, fin, rsv1, rsv2, rsv3, mask): + """Creates a frame header. + + Raises: + Exception: when bad data is given. + """ + + if opcode < 0 or 0xf < opcode: + raise ValueError('Opcode out of range') + + if payload_length < 0 or (1 << 63) <= payload_length: + raise ValueError('payload_length out of range') + + if (fin | rsv1 | rsv2 | rsv3) & ~1: + raise ValueError('FIN bit and Reserved bit parameter must be 0 or 1') + + header = '' + + first_byte = ((fin << 7) + | (rsv1 << 6) | (rsv2 << 5) | (rsv3 << 4) + | opcode) + header += chr(first_byte) + header += create_length_header(payload_length, mask) + + return header + + +def _build_frame(header, body, mask): + if not mask: + return header + body + + masking_nonce = os.urandom(4) + masker = util.RepeatedXorMasker(masking_nonce) + + return header + masking_nonce + masker.mask(body) + + +def _filter_and_format_frame_object(frame, mask, frame_filters): + for frame_filter in frame_filters: + frame_filter.filter(frame) + + header = create_header( + frame.opcode, len(frame.payload), frame.fin, + frame.rsv1, frame.rsv2, frame.rsv3, mask) + return _build_frame(header, frame.payload, mask) + + +def create_binary_frame( + message, opcode=common.OPCODE_BINARY, fin=1, mask=False, frame_filters=[]): + """Creates a simple binary frame with no extension, reserved bit.""" + + frame = Frame(fin=fin, opcode=opcode, payload=message) + return _filter_and_format_frame_object(frame, mask, frame_filters) + + +def create_text_frame( + message, opcode=common.OPCODE_TEXT, fin=1, mask=False, frame_filters=[]): + """Creates a simple text frame with no extension, reserved bit.""" + + encoded_message = message.encode('utf-8') + return create_binary_frame(encoded_message, opcode, fin, mask, + frame_filters) + + +def parse_frame(receive_bytes, logger=None, + ws_version=common.VERSION_HYBI_LATEST, + unmask_receive=True): + """Parses a frame. Returns a tuple containing each header field and + payload. + + Args: + receive_bytes: a function that reads frame data from a stream or + something similar. The function takes length of the bytes to be + read. The function must raise ConnectionTerminatedException if + there is not enough data to be read. + logger: a logging object. + ws_version: the version of WebSocket protocol. + unmask_receive: unmask received frames. When received unmasked + frame, raises InvalidFrameException. + + Raises: + ConnectionTerminatedException: when receive_bytes raises it. + InvalidFrameException: when the frame contains invalid data. + """ + + if not logger: + logger = logging.getLogger() + + logger.log(common.LOGLEVEL_FINE, 'Receive the first 2 octets of a frame') + + received = receive_bytes(2) + + first_byte = ord(received[0]) + fin = (first_byte >> 7) & 1 + rsv1 = (first_byte >> 6) & 1 + rsv2 = (first_byte >> 5) & 1 + rsv3 = (first_byte >> 4) & 1 + opcode = first_byte & 0xf + + second_byte = ord(received[1]) + mask = (second_byte >> 7) & 1 + payload_length = second_byte & 0x7f + + logger.log(common.LOGLEVEL_FINE, + 'FIN=%s, RSV1=%s, RSV2=%s, RSV3=%s, opcode=%s, ' + 'Mask=%s, Payload_length=%s', + fin, rsv1, rsv2, rsv3, opcode, mask, payload_length) + + if (mask == 1) != unmask_receive: + raise InvalidFrameException( + 'Mask bit on the received frame did\'nt match masking ' + 'configuration for received frames') + + # The HyBi and later specs disallow putting a value in 0x0-0xFFFF + # into the 8-octet extended payload length field (or 0x0-0xFD in + # 2-octet field). + valid_length_encoding = True + length_encoding_bytes = 1 + if payload_length == 127: + logger.log(common.LOGLEVEL_FINE, + 'Receive 8-octet extended payload length') + + extended_payload_length = receive_bytes(8) + payload_length = struct.unpack( + '!Q', extended_payload_length)[0] + if payload_length > 0x7FFFFFFFFFFFFFFF: + raise InvalidFrameException( + 'Extended payload length >= 2^63') + if ws_version >= 13 and payload_length < 0x10000: + valid_length_encoding = False + length_encoding_bytes = 8 + + logger.log(common.LOGLEVEL_FINE, + 'Decoded_payload_length=%s', payload_length) + elif payload_length == 126: + logger.log(common.LOGLEVEL_FINE, + 'Receive 2-octet extended payload length') + + extended_payload_length = receive_bytes(2) + payload_length = struct.unpack( + '!H', extended_payload_length)[0] + if ws_version >= 13 and payload_length < 126: + valid_length_encoding = False + length_encoding_bytes = 2 + + logger.log(common.LOGLEVEL_FINE, + 'Decoded_payload_length=%s', payload_length) + + if not valid_length_encoding: + logger.warning( + 'Payload length is not encoded using the minimal number of ' + 'bytes (%d is encoded using %d bytes)', + payload_length, + length_encoding_bytes) + + if mask == 1: + logger.log(common.LOGLEVEL_FINE, 'Receive mask') + + masking_nonce = receive_bytes(4) + masker = util.RepeatedXorMasker(masking_nonce) + + logger.log(common.LOGLEVEL_FINE, 'Mask=%r', masking_nonce) + else: + masker = _NOOP_MASKER + + logger.log(common.LOGLEVEL_FINE, 'Receive payload data') + if logger.isEnabledFor(common.LOGLEVEL_FINE): + receive_start = time.time() + + raw_payload_bytes = receive_bytes(payload_length) + + if logger.isEnabledFor(common.LOGLEVEL_FINE): + logger.log( + common.LOGLEVEL_FINE, + 'Done receiving payload data at %s MB/s', + payload_length / (time.time() - receive_start) / 1000 / 1000) + logger.log(common.LOGLEVEL_FINE, 'Unmask payload data') + + if logger.isEnabledFor(common.LOGLEVEL_FINE): + unmask_start = time.time() + + bytes = masker.mask(raw_payload_bytes) + + if logger.isEnabledFor(common.LOGLEVEL_FINE): + logger.log( + common.LOGLEVEL_FINE, + 'Done unmasking payload data at %s MB/s', + payload_length / (time.time() - unmask_start) / 1000 / 1000) + + return opcode, bytes, fin, rsv1, rsv2, rsv3 + + +class FragmentedFrameBuilder(object): + """A stateful class to send a message as fragments.""" + + def __init__(self, mask, frame_filters=[], encode_utf8=True): + """Constructs an instance.""" + + self._mask = mask + self._frame_filters = frame_filters + # This is for skipping UTF-8 encoding when building text type frames + # from compressed data. + self._encode_utf8 = encode_utf8 + + self._started = False + + # Hold opcode of the first frame in messages to verify types of other + # frames in the message are all the same. + self._opcode = common.OPCODE_TEXT + + def build(self, payload_data, end, binary): + if binary: + frame_type = common.OPCODE_BINARY + else: + frame_type = common.OPCODE_TEXT + if self._started: + if self._opcode != frame_type: + raise ValueError('Message types are different in frames for ' + 'the same message') + opcode = common.OPCODE_CONTINUATION + else: + opcode = frame_type + self._opcode = frame_type + + if end: + self._started = False + fin = 1 + else: + self._started = True + fin = 0 + + if binary or not self._encode_utf8: + return create_binary_frame( + payload_data, opcode, fin, self._mask, self._frame_filters) + else: + return create_text_frame( + payload_data, opcode, fin, self._mask, self._frame_filters) + + +def _create_control_frame(opcode, body, mask, frame_filters): + frame = Frame(opcode=opcode, payload=body) + + for frame_filter in frame_filters: + frame_filter.filter(frame) + + if len(frame.payload) > 125: + raise BadOperationException( + 'Payload data size of control frames must be 125 bytes or less') + + header = create_header( + frame.opcode, len(frame.payload), frame.fin, + frame.rsv1, frame.rsv2, frame.rsv3, mask) + return _build_frame(header, frame.payload, mask) + + +def create_ping_frame(body, mask=False, frame_filters=[]): + return _create_control_frame(common.OPCODE_PING, body, mask, frame_filters) + + +def create_pong_frame(body, mask=False, frame_filters=[]): + return _create_control_frame(common.OPCODE_PONG, body, mask, frame_filters) + + +def create_close_frame(body, mask=False, frame_filters=[]): + return _create_control_frame( + common.OPCODE_CLOSE, body, mask, frame_filters) + + +def create_closing_handshake_body(code, reason): + body = '' + if code is not None: + if (code > common.STATUS_USER_PRIVATE_MAX or + code < common.STATUS_NORMAL_CLOSURE): + raise BadOperationException('Status code is out of range') + if (code == common.STATUS_NO_STATUS_RECEIVED or + code == common.STATUS_ABNORMAL_CLOSURE or + code == common.STATUS_TLS_HANDSHAKE): + raise BadOperationException('Status code is reserved pseudo ' + 'code') + encoded_reason = reason.encode('utf-8') + body = struct.pack('!H', code) + encoded_reason + return body + + +class StreamOptions(object): + """Holds option values to configure Stream objects.""" + + def __init__(self): + """Constructs StreamOptions.""" + + # Enables deflate-stream extension. + self.deflate_stream = False + + # Filters applied to frames. + self.outgoing_frame_filters = [] + self.incoming_frame_filters = [] + + # Filters applied to messages. Control frames are not affected by them. + self.outgoing_message_filters = [] + self.incoming_message_filters = [] + + self.encode_text_message_to_utf8 = True + self.mask_send = False + self.unmask_receive = True + # RFC6455 disallows fragmented control frames, but mux extension + # relaxes the restriction. + self.allow_fragmented_control_frame = False + + +class Stream(StreamBase): + """A class for parsing/building frames of the WebSocket protocol + (RFC 6455). + """ + + def __init__(self, request, options): + """Constructs an instance. + + Args: + request: mod_python request. + """ + + StreamBase.__init__(self, request) + + self._logger = util.get_class_logger(self) + + self._options = options + + if self._options.deflate_stream: + self._logger.debug('Setup filter for deflate-stream') + self._request = util.DeflateRequest(self._request) + + self._request.client_terminated = False + self._request.server_terminated = False + + # Holds body of received fragments. + self._received_fragments = [] + # Holds the opcode of the first fragment. + self._original_opcode = None + + self._writer = FragmentedFrameBuilder( + self._options.mask_send, self._options.outgoing_frame_filters, + self._options.encode_text_message_to_utf8) + + self._ping_queue = deque() + + def _receive_frame(self): + """Receives a frame and return data in the frame as a tuple containing + each header field and payload separately. + + Raises: + ConnectionTerminatedException: when read returns empty + string. + InvalidFrameException: when the frame contains invalid data. + """ + + def _receive_bytes(length): + return self.receive_bytes(length) + + return parse_frame(receive_bytes=_receive_bytes, + logger=self._logger, + ws_version=self._request.ws_version, + unmask_receive=self._options.unmask_receive) + + def _receive_frame_as_frame_object(self): + opcode, bytes, fin, rsv1, rsv2, rsv3 = self._receive_frame() + + return Frame(fin=fin, rsv1=rsv1, rsv2=rsv2, rsv3=rsv3, + opcode=opcode, payload=bytes) + + def receive_filtered_frame(self): + """Receives a frame and applies frame filters and message filters. + The frame to be received must satisfy following conditions: + - The frame is not fragmented. + - The opcode of the frame is TEXT or BINARY. + + DO NOT USE this method except for testing purpose. + """ + + frame = self._receive_frame_as_frame_object() + if not frame.fin: + raise InvalidFrameException( + 'Segmented frames must not be received via ' + 'receive_filtered_frame()') + if (frame.opcode != common.OPCODE_TEXT and + frame.opcode != common.OPCODE_BINARY): + raise InvalidFrameException( + 'Control frames must not be received via ' + 'receive_filtered_frame()') + + for frame_filter in self._options.incoming_frame_filters: + frame_filter.filter(frame) + for message_filter in self._options.incoming_message_filters: + frame.payload = message_filter.filter(frame.payload) + return frame + + def send_message(self, message, end=True, binary=False): + """Send message. + + Args: + message: text in unicode or binary in str to send. + binary: send message as binary frame. + + Raises: + BadOperationException: when called on a server-terminated + connection or called with inconsistent message type or + binary parameter. + """ + + if self._request.server_terminated: + raise BadOperationException( + 'Requested send_message after sending out a closing handshake') + + if binary and isinstance(message, unicode): + raise BadOperationException( + 'Message for binary frame must be instance of str') + + for message_filter in self._options.outgoing_message_filters: + message = message_filter.filter(message, end, binary) + + try: + # Set this to any positive integer to limit maximum size of data in + # payload data of each frame. + MAX_PAYLOAD_DATA_SIZE = -1 + + if MAX_PAYLOAD_DATA_SIZE <= 0: + self._write(self._writer.build(message, end, binary)) + return + + bytes_written = 0 + while True: + end_for_this_frame = end + bytes_to_write = len(message) - bytes_written + if (MAX_PAYLOAD_DATA_SIZE > 0 and + bytes_to_write > MAX_PAYLOAD_DATA_SIZE): + end_for_this_frame = False + bytes_to_write = MAX_PAYLOAD_DATA_SIZE + + frame = self._writer.build( + message[bytes_written:bytes_written + bytes_to_write], + end_for_this_frame, + binary) + self._write(frame) + + bytes_written += bytes_to_write + + # This if must be placed here (the end of while block) so that + # at least one frame is sent. + if len(message) <= bytes_written: + break + except ValueError, e: + raise BadOperationException(e) + + def _get_message_from_frame(self, frame): + """Gets a message from frame. If the message is composed of fragmented + frames and the frame is not the last fragmented frame, this method + returns None. The whole message will be returned when the last + fragmented frame is passed to this method. + + Raises: + InvalidFrameException: when the frame doesn't match defragmentation + context, or the frame contains invalid data. + """ + + if frame.opcode == common.OPCODE_CONTINUATION: + if not self._received_fragments: + if frame.fin: + raise InvalidFrameException( + 'Received a termination frame but fragmentation ' + 'not started') + else: + raise InvalidFrameException( + 'Received an intermediate frame but ' + 'fragmentation not started') + + if frame.fin: + # End of fragmentation frame + self._received_fragments.append(frame.payload) + message = ''.join(self._received_fragments) + self._received_fragments = [] + return message + else: + # Intermediate frame + self._received_fragments.append(frame.payload) + return None + else: + if self._received_fragments: + if frame.fin: + raise InvalidFrameException( + 'Received an unfragmented frame without ' + 'terminating existing fragmentation') + else: + raise InvalidFrameException( + 'New fragmentation started without terminating ' + 'existing fragmentation') + + if frame.fin: + # Unfragmented frame + + self._original_opcode = frame.opcode + return frame.payload + else: + # Start of fragmentation frame + + if (not self._options.allow_fragmented_control_frame and + common.is_control_opcode(frame.opcode)): + raise InvalidFrameException( + 'Control frames must not be fragmented') + + self._original_opcode = frame.opcode + self._received_fragments.append(frame.payload) + return None + + def _process_close_message(self, message): + """Processes close message. + + Args: + message: close message. + + Raises: + InvalidFrameException: when the message is invalid. + """ + + self._request.client_terminated = True + + # Status code is optional. We can have status reason only if we + # have status code. Status reason can be empty string. So, + # allowed cases are + # - no application data: no code no reason + # - 2 octet of application data: has code but no reason + # - 3 or more octet of application data: both code and reason + if len(message) == 0: + self._logger.debug('Received close frame (empty body)') + self._request.ws_close_code = ( + common.STATUS_NO_STATUS_RECEIVED) + elif len(message) == 1: + raise InvalidFrameException( + 'If a close frame has status code, the length of ' + 'status code must be 2 octet') + elif len(message) >= 2: + self._request.ws_close_code = struct.unpack( + '!H', message[0:2])[0] + self._request.ws_close_reason = message[2:].decode( + 'utf-8', 'replace') + self._logger.debug( + 'Received close frame (code=%d, reason=%r)', + self._request.ws_close_code, + self._request.ws_close_reason) + + # Drain junk data after the close frame if necessary. + self._drain_received_data() + + if self._request.server_terminated: + self._logger.debug( + 'Received ack for server-initiated closing handshake') + return + + self._logger.debug( + 'Received client-initiated closing handshake') + + code = common.STATUS_NORMAL_CLOSURE + reason = '' + if hasattr(self._request, '_dispatcher'): + dispatcher = self._request._dispatcher + code, reason = dispatcher.passive_closing_handshake( + self._request) + if code is None and reason is not None and len(reason) > 0: + self._logger.warning( + 'Handler specified reason despite code being None') + reason = '' + if reason is None: + reason = '' + self._send_closing_handshake(code, reason) + self._logger.debug( + 'Sent ack for client-initiated closing handshake ' + '(code=%r, reason=%r)', code, reason) + + def _process_ping_message(self, message): + """Processes ping message. + + Args: + message: ping message. + """ + + try: + handler = self._request.on_ping_handler + if handler: + handler(self._request, message) + return + except AttributeError, e: + pass + self._send_pong(message) + + def _process_pong_message(self, message): + """Processes pong message. + + Args: + message: pong message. + """ + + # TODO(tyoshino): Add ping timeout handling. + + inflight_pings = deque() + + while True: + try: + expected_body = self._ping_queue.popleft() + if expected_body == message: + # inflight_pings contains pings ignored by the + # other peer. Just forget them. + self._logger.debug( + 'Ping %r is acked (%d pings were ignored)', + expected_body, len(inflight_pings)) + break + else: + inflight_pings.append(expected_body) + except IndexError, e: + # The received pong was unsolicited pong. Keep the + # ping queue as is. + self._ping_queue = inflight_pings + self._logger.debug('Received a unsolicited pong') + break + + try: + handler = self._request.on_pong_handler + if handler: + handler(self._request, message) + except AttributeError, e: + pass + + def receive_message(self): + """Receive a WebSocket frame and return its payload as a text in + unicode or a binary in str. + + Returns: + payload data of the frame + - as unicode instance if received text frame + - as str instance if received binary frame + or None iff received closing handshake. + Raises: + BadOperationException: when called on a client-terminated + connection. + ConnectionTerminatedException: when read returns empty + string. + InvalidFrameException: when the frame contains invalid + data. + UnsupportedFrameException: when the received frame has + flags, opcode we cannot handle. You can ignore this + exception and continue receiving the next frame. + """ + + if self._request.client_terminated: + raise BadOperationException( + 'Requested receive_message after receiving a closing ' + 'handshake') + + while True: + # mp_conn.read will block if no bytes are available. + # Timeout is controlled by TimeOut directive of Apache. + + frame = self._receive_frame_as_frame_object() + + # Check the constraint on the payload size for control frames + # before extension processes the frame. + # See also http://tools.ietf.org/html/rfc6455#section-5.5 + if (common.is_control_opcode(frame.opcode) and + len(frame.payload) > 125): + raise InvalidFrameException( + 'Payload data size of control frames must be 125 bytes or ' + 'less') + + for frame_filter in self._options.incoming_frame_filters: + frame_filter.filter(frame) + + if frame.rsv1 or frame.rsv2 or frame.rsv3: + raise UnsupportedFrameException( + 'Unsupported flag is set (rsv = %d%d%d)' % + (frame.rsv1, frame.rsv2, frame.rsv3)) + + message = self._get_message_from_frame(frame) + if message is None: + continue + + for message_filter in self._options.incoming_message_filters: + message = message_filter.filter(message) + + if self._original_opcode == common.OPCODE_TEXT: + # The WebSocket protocol section 4.4 specifies that invalid + # characters must be replaced with U+fffd REPLACEMENT + # CHARACTER. + try: + return message.decode('utf-8') + except UnicodeDecodeError, e: + raise InvalidUTF8Exception(e) + elif self._original_opcode == common.OPCODE_BINARY: + return message + elif self._original_opcode == common.OPCODE_CLOSE: + self._process_close_message(message) + return None + elif self._original_opcode == common.OPCODE_PING: + self._process_ping_message(message) + elif self._original_opcode == common.OPCODE_PONG: + self._process_pong_message(message) + else: + raise UnsupportedFrameException( + 'Opcode %d is not supported' % self._original_opcode) + + def _send_closing_handshake(self, code, reason): + body = create_closing_handshake_body(code, reason) + frame = create_close_frame( + body, mask=self._options.mask_send, + frame_filters=self._options.outgoing_frame_filters) + + self._request.server_terminated = True + + self._write(frame) + + def close_connection(self, code=common.STATUS_NORMAL_CLOSURE, reason=''): + """Closes a WebSocket connection. + + Args: + code: Status code for close frame. If code is None, a close + frame with empty body will be sent. + reason: string representing close reason. + Raises: + BadOperationException: when reason is specified with code None + or reason is not an instance of both str and unicode. + """ + + if self._request.server_terminated: + self._logger.debug( + 'Requested close_connection but server is already terminated') + return + + if code is None: + if reason is not None and len(reason) > 0: + raise BadOperationException( + 'close reason must not be specified if code is None') + reason = '' + else: + if not isinstance(reason, str) and not isinstance(reason, unicode): + raise BadOperationException( + 'close reason must be an instance of str or unicode') + + self._send_closing_handshake(code, reason) + self._logger.debug( + 'Sent server-initiated closing handshake (code=%r, reason=%r)', + code, reason) + + if (code == common.STATUS_GOING_AWAY or + code == common.STATUS_PROTOCOL_ERROR): + # It doesn't make sense to wait for a close frame if the reason is + # protocol error or that the server is going away. For some of + # other reasons, it might not make sense to wait for a close frame, + # but it's not clear, yet. + return + + # TODO(ukai): 2. wait until the /client terminated/ flag has been set, + # or until a server-defined timeout expires. + # + # For now, we expect receiving closing handshake right after sending + # out closing handshake. + message = self.receive_message() + if message is not None: + raise ConnectionTerminatedException( + 'Didn\'t receive valid ack for closing handshake') + # TODO: 3. close the WebSocket connection. + # note: mod_python Connection (mp_conn) doesn't have close method. + + def send_ping(self, body=''): + frame = create_ping_frame( + body, + self._options.mask_send, + self._options.outgoing_frame_filters) + self._write(frame) + + self._ping_queue.append(body) + + def _send_pong(self, body): + frame = create_pong_frame( + body, + self._options.mask_send, + self._options.outgoing_frame_filters) + self._write(frame) + + def get_last_received_opcode(self): + """Returns the opcode of the WebSocket message which the last received + frame belongs to. The return value is valid iff immediately after + receive_message call. + """ + + return self._original_opcode + + def _drain_received_data(self): + """Drains unread data in the receive buffer to avoid sending out TCP + RST packet. This is because when deflate-stream is enabled, some + DEFLATE block for flushing data may follow a close frame. If any data + remains in the receive buffer of a socket when the socket is closed, + it sends out TCP RST packet to the other peer. + + Since mod_python's mp_conn object doesn't support non-blocking read, + we perform this only when pywebsocket is running in standalone mode. + """ + + # If self._options.deflate_stream is true, self._request is + # DeflateRequest, so we can get wrapped request object by + # self._request._request. + # + # Only _StandaloneRequest has _drain_received_data method. + if (self._options.deflate_stream and + ('_drain_received_data' in dir(self._request._request))): + self._request._request._drain_received_data() + + +# vi:sts=4 sw=4 et diff --git a/module/lib/mod_pywebsocket/common.py b/module/lib/mod_pywebsocket/common.py new file mode 100644 index 000000000..2388379c0 --- /dev/null +++ b/module/lib/mod_pywebsocket/common.py @@ -0,0 +1,307 @@ +# Copyright 2012, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +"""This file must not depend on any module specific to the WebSocket protocol. +""" + + +from mod_pywebsocket import http_header_util + + +# Additional log level definitions. +LOGLEVEL_FINE = 9 + +# Constants indicating WebSocket protocol version. +VERSION_HIXIE75 = -1 +VERSION_HYBI00 = 0 +VERSION_HYBI01 = 1 +VERSION_HYBI02 = 2 +VERSION_HYBI03 = 2 +VERSION_HYBI04 = 4 +VERSION_HYBI05 = 5 +VERSION_HYBI06 = 6 +VERSION_HYBI07 = 7 +VERSION_HYBI08 = 8 +VERSION_HYBI09 = 8 +VERSION_HYBI10 = 8 +VERSION_HYBI11 = 8 +VERSION_HYBI12 = 8 +VERSION_HYBI13 = 13 +VERSION_HYBI14 = 13 +VERSION_HYBI15 = 13 +VERSION_HYBI16 = 13 +VERSION_HYBI17 = 13 + +# Constants indicating WebSocket protocol latest version. +VERSION_HYBI_LATEST = VERSION_HYBI13 + +# Port numbers +DEFAULT_WEB_SOCKET_PORT = 80 +DEFAULT_WEB_SOCKET_SECURE_PORT = 443 + +# Schemes +WEB_SOCKET_SCHEME = 'ws' +WEB_SOCKET_SECURE_SCHEME = 'wss' + +# Frame opcodes defined in the spec. +OPCODE_CONTINUATION = 0x0 +OPCODE_TEXT = 0x1 +OPCODE_BINARY = 0x2 +OPCODE_CLOSE = 0x8 +OPCODE_PING = 0x9 +OPCODE_PONG = 0xa + +# UUIDs used by HyBi 04 and later opening handshake and frame masking. +WEBSOCKET_ACCEPT_UUID = '258EAFA5-E914-47DA-95CA-C5AB0DC85B11' + +# Opening handshake header names and expected values. +UPGRADE_HEADER = 'Upgrade' +WEBSOCKET_UPGRADE_TYPE = 'websocket' +WEBSOCKET_UPGRADE_TYPE_HIXIE75 = 'WebSocket' +CONNECTION_HEADER = 'Connection' +UPGRADE_CONNECTION_TYPE = 'Upgrade' +HOST_HEADER = 'Host' +ORIGIN_HEADER = 'Origin' +SEC_WEBSOCKET_ORIGIN_HEADER = 'Sec-WebSocket-Origin' +SEC_WEBSOCKET_KEY_HEADER = 'Sec-WebSocket-Key' +SEC_WEBSOCKET_ACCEPT_HEADER = 'Sec-WebSocket-Accept' +SEC_WEBSOCKET_VERSION_HEADER = 'Sec-WebSocket-Version' +SEC_WEBSOCKET_PROTOCOL_HEADER = 'Sec-WebSocket-Protocol' +SEC_WEBSOCKET_EXTENSIONS_HEADER = 'Sec-WebSocket-Extensions' +SEC_WEBSOCKET_DRAFT_HEADER = 'Sec-WebSocket-Draft' +SEC_WEBSOCKET_KEY1_HEADER = 'Sec-WebSocket-Key1' +SEC_WEBSOCKET_KEY2_HEADER = 'Sec-WebSocket-Key2' +SEC_WEBSOCKET_LOCATION_HEADER = 'Sec-WebSocket-Location' + +# Extensions +DEFLATE_STREAM_EXTENSION = 'deflate-stream' +DEFLATE_FRAME_EXTENSION = 'deflate-frame' +PERFRAME_COMPRESSION_EXTENSION = 'perframe-compress' +PERMESSAGE_COMPRESSION_EXTENSION = 'permessage-compress' +X_WEBKIT_DEFLATE_FRAME_EXTENSION = 'x-webkit-deflate-frame' +X_WEBKIT_PERMESSAGE_COMPRESSION_EXTENSION = 'x-webkit-permessage-compress' +MUX_EXTENSION = 'mux_DO_NOT_USE' + +# Status codes +# Code STATUS_NO_STATUS_RECEIVED, STATUS_ABNORMAL_CLOSURE, and +# STATUS_TLS_HANDSHAKE are pseudo codes to indicate specific error cases. +# Could not be used for codes in actual closing frames. +# Application level errors must use codes in the range +# STATUS_USER_REGISTERED_BASE to STATUS_USER_PRIVATE_MAX. The codes in the +# range STATUS_USER_REGISTERED_BASE to STATUS_USER_REGISTERED_MAX are managed +# by IANA. Usually application must define user protocol level errors in the +# range STATUS_USER_PRIVATE_BASE to STATUS_USER_PRIVATE_MAX. +STATUS_NORMAL_CLOSURE = 1000 +STATUS_GOING_AWAY = 1001 +STATUS_PROTOCOL_ERROR = 1002 +STATUS_UNSUPPORTED_DATA = 1003 +STATUS_NO_STATUS_RECEIVED = 1005 +STATUS_ABNORMAL_CLOSURE = 1006 +STATUS_INVALID_FRAME_PAYLOAD_DATA = 1007 +STATUS_POLICY_VIOLATION = 1008 +STATUS_MESSAGE_TOO_BIG = 1009 +STATUS_MANDATORY_EXTENSION = 1010 +STATUS_INTERNAL_ENDPOINT_ERROR = 1011 +STATUS_TLS_HANDSHAKE = 1015 +STATUS_USER_REGISTERED_BASE = 3000 +STATUS_USER_REGISTERED_MAX = 3999 +STATUS_USER_PRIVATE_BASE = 4000 +STATUS_USER_PRIVATE_MAX = 4999 +# Following definitions are aliases to keep compatibility. Applications must +# not use these obsoleted definitions anymore. +STATUS_NORMAL = STATUS_NORMAL_CLOSURE +STATUS_UNSUPPORTED = STATUS_UNSUPPORTED_DATA +STATUS_CODE_NOT_AVAILABLE = STATUS_NO_STATUS_RECEIVED +STATUS_ABNORMAL_CLOSE = STATUS_ABNORMAL_CLOSURE +STATUS_INVALID_FRAME_PAYLOAD = STATUS_INVALID_FRAME_PAYLOAD_DATA +STATUS_MANDATORY_EXT = STATUS_MANDATORY_EXTENSION + +# HTTP status codes +HTTP_STATUS_BAD_REQUEST = 400 +HTTP_STATUS_FORBIDDEN = 403 +HTTP_STATUS_NOT_FOUND = 404 + + +def is_control_opcode(opcode): + return (opcode >> 3) == 1 + + +class ExtensionParameter(object): + """Holds information about an extension which is exchanged on extension + negotiation in opening handshake. + """ + + def __init__(self, name): + self._name = name + # TODO(tyoshino): Change the data structure to more efficient one such + # as dict when the spec changes to say like + # - Parameter names must be unique + # - The order of parameters is not significant + self._parameters = [] + + def name(self): + return self._name + + def add_parameter(self, name, value): + self._parameters.append((name, value)) + + def get_parameters(self): + return self._parameters + + def get_parameter_names(self): + return [name for name, unused_value in self._parameters] + + def has_parameter(self, name): + for param_name, param_value in self._parameters: + if param_name == name: + return True + return False + + def get_parameter_value(self, name): + for param_name, param_value in self._parameters: + if param_name == name: + return param_value + + +class ExtensionParsingException(Exception): + def __init__(self, name): + super(ExtensionParsingException, self).__init__(name) + + +def _parse_extension_param(state, definition, allow_quoted_string): + param_name = http_header_util.consume_token(state) + + if param_name is None: + raise ExtensionParsingException('No valid parameter name found') + + http_header_util.consume_lwses(state) + + if not http_header_util.consume_string(state, '='): + definition.add_parameter(param_name, None) + return + + http_header_util.consume_lwses(state) + + if allow_quoted_string: + # TODO(toyoshim): Add code to validate that parsed param_value is token + param_value = http_header_util.consume_token_or_quoted_string(state) + else: + param_value = http_header_util.consume_token(state) + if param_value is None: + raise ExtensionParsingException( + 'No valid parameter value found on the right-hand side of ' + 'parameter %r' % param_name) + + definition.add_parameter(param_name, param_value) + + +def _parse_extension(state, allow_quoted_string): + extension_token = http_header_util.consume_token(state) + if extension_token is None: + return None + + extension = ExtensionParameter(extension_token) + + while True: + http_header_util.consume_lwses(state) + + if not http_header_util.consume_string(state, ';'): + break + + http_header_util.consume_lwses(state) + + try: + _parse_extension_param(state, extension, allow_quoted_string) + except ExtensionParsingException, e: + raise ExtensionParsingException( + 'Failed to parse parameter for %r (%r)' % + (extension_token, e)) + + return extension + + +def parse_extensions(data, allow_quoted_string=False): + """Parses Sec-WebSocket-Extensions header value returns a list of + ExtensionParameter objects. + + Leading LWSes must be trimmed. + """ + + state = http_header_util.ParsingState(data) + + extension_list = [] + while True: + extension = _parse_extension(state, allow_quoted_string) + if extension is not None: + extension_list.append(extension) + + http_header_util.consume_lwses(state) + + if http_header_util.peek(state) is None: + break + + if not http_header_util.consume_string(state, ','): + raise ExtensionParsingException( + 'Failed to parse Sec-WebSocket-Extensions header: ' + 'Expected a comma but found %r' % + http_header_util.peek(state)) + + http_header_util.consume_lwses(state) + + if len(extension_list) == 0: + raise ExtensionParsingException( + 'No valid extension entry found') + + return extension_list + + +def format_extension(extension): + """Formats an ExtensionParameter object.""" + + formatted_params = [extension.name()] + for param_name, param_value in extension.get_parameters(): + if param_value is None: + formatted_params.append(param_name) + else: + quoted_value = http_header_util.quote_if_necessary(param_value) + formatted_params.append('%s=%s' % (param_name, quoted_value)) + return '; '.join(formatted_params) + + +def format_extensions(extension_list): + """Formats a list of ExtensionParameter objects.""" + + formatted_extension_list = [] + for extension in extension_list: + formatted_extension_list.append(format_extension(extension)) + return ', '.join(formatted_extension_list) + + +# vi:sts=4 sw=4 et diff --git a/module/lib/mod_pywebsocket/dispatch.py b/module/lib/mod_pywebsocket/dispatch.py new file mode 100644 index 000000000..25905f180 --- /dev/null +++ b/module/lib/mod_pywebsocket/dispatch.py @@ -0,0 +1,387 @@ +# Copyright 2012, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +"""Dispatch WebSocket request. +""" + + +import logging +import os +import re + +from mod_pywebsocket import common +from mod_pywebsocket import handshake +from mod_pywebsocket import msgutil +from mod_pywebsocket import mux +from mod_pywebsocket import stream +from mod_pywebsocket import util + + +_SOURCE_PATH_PATTERN = re.compile(r'(?i)_wsh\.py$') +_SOURCE_SUFFIX = '_wsh.py' +_DO_EXTRA_HANDSHAKE_HANDLER_NAME = 'web_socket_do_extra_handshake' +_TRANSFER_DATA_HANDLER_NAME = 'web_socket_transfer_data' +_PASSIVE_CLOSING_HANDSHAKE_HANDLER_NAME = ( + 'web_socket_passive_closing_handshake') + + +class DispatchException(Exception): + """Exception in dispatching WebSocket request.""" + + def __init__(self, name, status=common.HTTP_STATUS_NOT_FOUND): + super(DispatchException, self).__init__(name) + self.status = status + + +def _default_passive_closing_handshake_handler(request): + """Default web_socket_passive_closing_handshake handler.""" + + return common.STATUS_NORMAL_CLOSURE, '' + + +def _normalize_path(path): + """Normalize path. + + Args: + path: the path to normalize. + + Path is converted to the absolute path. + The input path can use either '\\' or '/' as the separator. + The normalized path always uses '/' regardless of the platform. + """ + + path = path.replace('\\', os.path.sep) + path = os.path.realpath(path) + path = path.replace('\\', '/') + return path + + +def _create_path_to_resource_converter(base_dir): + """Returns a function that converts the path of a WebSocket handler source + file to a resource string by removing the path to the base directory from + its head, removing _SOURCE_SUFFIX from its tail, and replacing path + separators in it with '/'. + + Args: + base_dir: the path to the base directory. + """ + + base_dir = _normalize_path(base_dir) + + base_len = len(base_dir) + suffix_len = len(_SOURCE_SUFFIX) + + def converter(path): + if not path.endswith(_SOURCE_SUFFIX): + return None + # _normalize_path must not be used because resolving symlink breaks + # following path check. + path = path.replace('\\', '/') + if not path.startswith(base_dir): + return None + return path[base_len:-suffix_len] + + return converter + + +def _enumerate_handler_file_paths(directory): + """Returns a generator that enumerates WebSocket Handler source file names + in the given directory. + """ + + for root, unused_dirs, files in os.walk(directory): + for base in files: + path = os.path.join(root, base) + if _SOURCE_PATH_PATTERN.search(path): + yield path + + +class _HandlerSuite(object): + """A handler suite holder class.""" + + def __init__(self, do_extra_handshake, transfer_data, + passive_closing_handshake): + self.do_extra_handshake = do_extra_handshake + self.transfer_data = transfer_data + self.passive_closing_handshake = passive_closing_handshake + + +def _source_handler_file(handler_definition): + """Source a handler definition string. + + Args: + handler_definition: a string containing Python statements that define + handler functions. + """ + + global_dic = {} + try: + exec handler_definition in global_dic + except Exception: + raise DispatchException('Error in sourcing handler:' + + util.get_stack_trace()) + passive_closing_handshake_handler = None + try: + passive_closing_handshake_handler = _extract_handler( + global_dic, _PASSIVE_CLOSING_HANDSHAKE_HANDLER_NAME) + except Exception: + passive_closing_handshake_handler = ( + _default_passive_closing_handshake_handler) + return _HandlerSuite( + _extract_handler(global_dic, _DO_EXTRA_HANDSHAKE_HANDLER_NAME), + _extract_handler(global_dic, _TRANSFER_DATA_HANDLER_NAME), + passive_closing_handshake_handler) + + +def _extract_handler(dic, name): + """Extracts a callable with the specified name from the given dictionary + dic. + """ + + if name not in dic: + raise DispatchException('%s is not defined.' % name) + handler = dic[name] + if not callable(handler): + raise DispatchException('%s is not callable.' % name) + return handler + + +class Dispatcher(object): + """Dispatches WebSocket requests. + + This class maintains a map from resource name to handlers. + """ + + def __init__( + self, root_dir, scan_dir=None, + allow_handlers_outside_root_dir=True): + """Construct an instance. + + Args: + root_dir: The directory where handler definition files are + placed. + scan_dir: The directory where handler definition files are + searched. scan_dir must be a directory under root_dir, + including root_dir itself. If scan_dir is None, + root_dir is used as scan_dir. scan_dir can be useful + in saving scan time when root_dir contains many + subdirectories. + allow_handlers_outside_root_dir: Scans handler files even if their + canonical path is not under root_dir. + """ + + self._logger = util.get_class_logger(self) + + self._handler_suite_map = {} + self._source_warnings = [] + if scan_dir is None: + scan_dir = root_dir + if not os.path.realpath(scan_dir).startswith( + os.path.realpath(root_dir)): + raise DispatchException('scan_dir:%s must be a directory under ' + 'root_dir:%s.' % (scan_dir, root_dir)) + self._source_handler_files_in_dir( + root_dir, scan_dir, allow_handlers_outside_root_dir) + + def add_resource_path_alias(self, + alias_resource_path, existing_resource_path): + """Add resource path alias. + + Once added, request to alias_resource_path would be handled by + handler registered for existing_resource_path. + + Args: + alias_resource_path: alias resource path + existing_resource_path: existing resource path + """ + try: + handler_suite = self._handler_suite_map[existing_resource_path] + self._handler_suite_map[alias_resource_path] = handler_suite + except KeyError: + raise DispatchException('No handler for: %r' % + existing_resource_path) + + def source_warnings(self): + """Return warnings in sourcing handlers.""" + + return self._source_warnings + + def do_extra_handshake(self, request): + """Do extra checking in WebSocket handshake. + + Select a handler based on request.uri and call its + web_socket_do_extra_handshake function. + + Args: + request: mod_python request. + + Raises: + DispatchException: when handler was not found + AbortedByUserException: when user handler abort connection + HandshakeException: when opening handshake failed + """ + + handler_suite = self.get_handler_suite(request.ws_resource) + if handler_suite is None: + raise DispatchException('No handler for: %r' % request.ws_resource) + do_extra_handshake_ = handler_suite.do_extra_handshake + try: + do_extra_handshake_(request) + except handshake.AbortedByUserException, e: + raise + except Exception, e: + util.prepend_message_to_exception( + '%s raised exception for %s: ' % ( + _DO_EXTRA_HANDSHAKE_HANDLER_NAME, + request.ws_resource), + e) + raise handshake.HandshakeException(e, common.HTTP_STATUS_FORBIDDEN) + + def transfer_data(self, request): + """Let a handler transfer_data with a WebSocket client. + + Select a handler based on request.ws_resource and call its + web_socket_transfer_data function. + + Args: + request: mod_python request. + + Raises: + DispatchException: when handler was not found + AbortedByUserException: when user handler abort connection + """ + + # TODO(tyoshino): Terminate underlying TCP connection if possible. + try: + if mux.use_mux(request): + mux.start(request, self) + else: + handler_suite = self.get_handler_suite(request.ws_resource) + if handler_suite is None: + raise DispatchException('No handler for: %r' % + request.ws_resource) + transfer_data_ = handler_suite.transfer_data + transfer_data_(request) + + if not request.server_terminated: + request.ws_stream.close_connection() + # Catch non-critical exceptions the handler didn't handle. + except handshake.AbortedByUserException, e: + self._logger.debug('%s', e) + raise + except msgutil.BadOperationException, e: + self._logger.debug('%s', e) + request.ws_stream.close_connection(common.STATUS_ABNORMAL_CLOSURE) + except msgutil.InvalidFrameException, e: + # InvalidFrameException must be caught before + # ConnectionTerminatedException that catches InvalidFrameException. + self._logger.debug('%s', e) + request.ws_stream.close_connection(common.STATUS_PROTOCOL_ERROR) + except msgutil.UnsupportedFrameException, e: + self._logger.debug('%s', e) + request.ws_stream.close_connection(common.STATUS_UNSUPPORTED_DATA) + except stream.InvalidUTF8Exception, e: + self._logger.debug('%s', e) + request.ws_stream.close_connection( + common.STATUS_INVALID_FRAME_PAYLOAD_DATA) + except msgutil.ConnectionTerminatedException, e: + self._logger.debug('%s', e) + except Exception, e: + util.prepend_message_to_exception( + '%s raised exception for %s: ' % ( + _TRANSFER_DATA_HANDLER_NAME, request.ws_resource), + e) + raise + + def passive_closing_handshake(self, request): + """Prepare code and reason for responding client initiated closing + handshake. + """ + + handler_suite = self.get_handler_suite(request.ws_resource) + if handler_suite is None: + return _default_passive_closing_handshake_handler(request) + return handler_suite.passive_closing_handshake(request) + + def get_handler_suite(self, resource): + """Retrieves two handlers (one for extra handshake processing, and one + for data transfer) for the given request as a HandlerSuite object. + """ + + fragment = None + if '#' in resource: + resource, fragment = resource.split('#', 1) + if '?' in resource: + resource = resource.split('?', 1)[0] + handler_suite = self._handler_suite_map.get(resource) + if handler_suite and fragment: + raise DispatchException('Fragment identifiers MUST NOT be used on ' + 'WebSocket URIs', + common.HTTP_STATUS_BAD_REQUEST) + return handler_suite + + def _source_handler_files_in_dir( + self, root_dir, scan_dir, allow_handlers_outside_root_dir): + """Source all the handler source files in the scan_dir directory. + + The resource path is determined relative to root_dir. + """ + + # We build a map from resource to handler code assuming that there's + # only one path from root_dir to scan_dir and it can be obtained by + # comparing realpath of them. + + # Here we cannot use abspath. See + # https://bugs.webkit.org/show_bug.cgi?id=31603 + + convert = _create_path_to_resource_converter(root_dir) + scan_realpath = os.path.realpath(scan_dir) + root_realpath = os.path.realpath(root_dir) + for path in _enumerate_handler_file_paths(scan_realpath): + if (not allow_handlers_outside_root_dir and + (not os.path.realpath(path).startswith(root_realpath))): + self._logger.debug( + 'Canonical path of %s is not under root directory' % + path) + continue + try: + handler_suite = _source_handler_file(open(path).read()) + except DispatchException, e: + self._source_warnings.append('%s: %s' % (path, e)) + continue + resource = convert(path) + if resource is None: + self._logger.debug( + 'Path to resource conversion on %s failed' % path) + else: + self._handler_suite_map[convert(path)] = handler_suite + + +# vi:sts=4 sw=4 et diff --git a/module/lib/mod_pywebsocket/extensions.py b/module/lib/mod_pywebsocket/extensions.py new file mode 100644 index 000000000..03dbf9ee1 --- /dev/null +++ b/module/lib/mod_pywebsocket/extensions.py @@ -0,0 +1,727 @@ +# Copyright 2012, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +from mod_pywebsocket import common +from mod_pywebsocket import util +from mod_pywebsocket.http_header_util import quote_if_necessary + + +_available_processors = {} + + +class ExtensionProcessorInterface(object): + + def name(self): + return None + + def get_extension_response(self): + return None + + def setup_stream_options(self, stream_options): + pass + + +class DeflateStreamExtensionProcessor(ExtensionProcessorInterface): + """WebSocket DEFLATE stream extension processor. + + Specification: + Section 9.2.1 in + http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-10 + """ + + def __init__(self, request): + self._logger = util.get_class_logger(self) + + self._request = request + + def name(self): + return common.DEFLATE_STREAM_EXTENSION + + def get_extension_response(self): + if len(self._request.get_parameter_names()) != 0: + return None + + self._logger.debug( + 'Enable %s extension', common.DEFLATE_STREAM_EXTENSION) + + return common.ExtensionParameter(common.DEFLATE_STREAM_EXTENSION) + + def setup_stream_options(self, stream_options): + stream_options.deflate_stream = True + + +_available_processors[common.DEFLATE_STREAM_EXTENSION] = ( + DeflateStreamExtensionProcessor) + + +def _log_compression_ratio(logger, original_bytes, total_original_bytes, + filtered_bytes, total_filtered_bytes): + # Print inf when ratio is not available. + ratio = float('inf') + average_ratio = float('inf') + if original_bytes != 0: + ratio = float(filtered_bytes) / original_bytes + if total_original_bytes != 0: + average_ratio = ( + float(total_filtered_bytes) / total_original_bytes) + logger.debug('Outgoing compress ratio: %f (average: %f)' % + (ratio, average_ratio)) + + +def _log_decompression_ratio(logger, received_bytes, total_received_bytes, + filtered_bytes, total_filtered_bytes): + # Print inf when ratio is not available. + ratio = float('inf') + average_ratio = float('inf') + if received_bytes != 0: + ratio = float(received_bytes) / filtered_bytes + if total_filtered_bytes != 0: + average_ratio = ( + float(total_received_bytes) / total_filtered_bytes) + logger.debug('Incoming compress ratio: %f (average: %f)' % + (ratio, average_ratio)) + + +class DeflateFrameExtensionProcessor(ExtensionProcessorInterface): + """WebSocket Per-frame DEFLATE extension processor. + + Specification: + http://tools.ietf.org/html/draft-tyoshino-hybi-websocket-perframe-deflate + """ + + _WINDOW_BITS_PARAM = 'max_window_bits' + _NO_CONTEXT_TAKEOVER_PARAM = 'no_context_takeover' + + def __init__(self, request): + self._logger = util.get_class_logger(self) + + self._request = request + + self._response_window_bits = None + self._response_no_context_takeover = False + self._bfinal = False + + # Counters for statistics. + + # Total number of outgoing bytes supplied to this filter. + self._total_outgoing_payload_bytes = 0 + # Total number of bytes sent to the network after applying this filter. + self._total_filtered_outgoing_payload_bytes = 0 + + # Total number of bytes received from the network. + self._total_incoming_payload_bytes = 0 + # Total number of incoming bytes obtained after applying this filter. + self._total_filtered_incoming_payload_bytes = 0 + + def name(self): + return common.DEFLATE_FRAME_EXTENSION + + def get_extension_response(self): + # Any unknown parameter will be just ignored. + + window_bits = self._request.get_parameter_value( + self._WINDOW_BITS_PARAM) + no_context_takeover = self._request.has_parameter( + self._NO_CONTEXT_TAKEOVER_PARAM) + if (no_context_takeover and + self._request.get_parameter_value( + self._NO_CONTEXT_TAKEOVER_PARAM) is not None): + return None + + if window_bits is not None: + try: + window_bits = int(window_bits) + except ValueError, e: + return None + if window_bits < 8 or window_bits > 15: + return None + + self._deflater = util._RFC1979Deflater( + window_bits, no_context_takeover) + + self._inflater = util._RFC1979Inflater() + + self._compress_outgoing = True + + response = common.ExtensionParameter(self._request.name()) + + if self._response_window_bits is not None: + response.add_parameter( + self._WINDOW_BITS_PARAM, str(self._response_window_bits)) + if self._response_no_context_takeover: + response.add_parameter( + self._NO_CONTEXT_TAKEOVER_PARAM, None) + + self._logger.debug( + 'Enable %s extension (' + 'request: window_bits=%s; no_context_takeover=%r, ' + 'response: window_wbits=%s; no_context_takeover=%r)' % + (self._request.name(), + window_bits, + no_context_takeover, + self._response_window_bits, + self._response_no_context_takeover)) + + return response + + def setup_stream_options(self, stream_options): + + class _OutgoingFilter(object): + + def __init__(self, parent): + self._parent = parent + + def filter(self, frame): + self._parent._outgoing_filter(frame) + + class _IncomingFilter(object): + + def __init__(self, parent): + self._parent = parent + + def filter(self, frame): + self._parent._incoming_filter(frame) + + stream_options.outgoing_frame_filters.append( + _OutgoingFilter(self)) + stream_options.incoming_frame_filters.insert( + 0, _IncomingFilter(self)) + + def set_response_window_bits(self, value): + self._response_window_bits = value + + def set_response_no_context_takeover(self, value): + self._response_no_context_takeover = value + + def set_bfinal(self, value): + self._bfinal = value + + def enable_outgoing_compression(self): + self._compress_outgoing = True + + def disable_outgoing_compression(self): + self._compress_outgoing = False + + def _outgoing_filter(self, frame): + """Transform outgoing frames. This method is called only by + an _OutgoingFilter instance. + """ + + original_payload_size = len(frame.payload) + self._total_outgoing_payload_bytes += original_payload_size + + if (not self._compress_outgoing or + common.is_control_opcode(frame.opcode)): + self._total_filtered_outgoing_payload_bytes += ( + original_payload_size) + return + + frame.payload = self._deflater.filter( + frame.payload, bfinal=self._bfinal) + frame.rsv1 = 1 + + filtered_payload_size = len(frame.payload) + self._total_filtered_outgoing_payload_bytes += filtered_payload_size + + _log_compression_ratio(self._logger, original_payload_size, + self._total_outgoing_payload_bytes, + filtered_payload_size, + self._total_filtered_outgoing_payload_bytes) + + def _incoming_filter(self, frame): + """Transform incoming frames. This method is called only by + an _IncomingFilter instance. + """ + + received_payload_size = len(frame.payload) + self._total_incoming_payload_bytes += received_payload_size + + if frame.rsv1 != 1 or common.is_control_opcode(frame.opcode): + self._total_filtered_incoming_payload_bytes += ( + received_payload_size) + return + + frame.payload = self._inflater.filter(frame.payload) + frame.rsv1 = 0 + + filtered_payload_size = len(frame.payload) + self._total_filtered_incoming_payload_bytes += filtered_payload_size + + _log_decompression_ratio(self._logger, received_payload_size, + self._total_incoming_payload_bytes, + filtered_payload_size, + self._total_filtered_incoming_payload_bytes) + + +_available_processors[common.DEFLATE_FRAME_EXTENSION] = ( + DeflateFrameExtensionProcessor) + + +# Adding vendor-prefixed deflate-frame extension. +# TODO(bashi): Remove this after WebKit stops using vendor prefix. +_available_processors[common.X_WEBKIT_DEFLATE_FRAME_EXTENSION] = ( + DeflateFrameExtensionProcessor) + + +def _parse_compression_method(data): + """Parses the value of "method" extension parameter.""" + + return common.parse_extensions(data, allow_quoted_string=True) + + +def _create_accepted_method_desc(method_name, method_params): + """Creates accepted-method-desc from given method name and parameters""" + + extension = common.ExtensionParameter(method_name) + for name, value in method_params: + extension.add_parameter(name, value) + return common.format_extension(extension) + + +class CompressionExtensionProcessorBase(ExtensionProcessorInterface): + """Base class for Per-frame and Per-message compression extension.""" + + _METHOD_PARAM = 'method' + + def __init__(self, request): + self._logger = util.get_class_logger(self) + self._request = request + self._compression_method_name = None + self._compression_processor = None + self._compression_processor_hook = None + + def name(self): + return '' + + def _lookup_compression_processor(self, method_desc): + return None + + def _get_compression_processor_response(self): + """Looks up the compression processor based on the self._request and + returns the compression processor's response. + """ + + method_list = self._request.get_parameter_value(self._METHOD_PARAM) + if method_list is None: + return None + methods = _parse_compression_method(method_list) + if methods is None: + return None + comression_processor = None + # The current implementation tries only the first method that matches + # supported algorithm. Following methods aren't tried even if the + # first one is rejected. + # TODO(bashi): Need to clarify this behavior. + for method_desc in methods: + compression_processor = self._lookup_compression_processor( + method_desc) + if compression_processor is not None: + self._compression_method_name = method_desc.name() + break + if compression_processor is None: + return None + + if self._compression_processor_hook: + self._compression_processor_hook(compression_processor) + + processor_response = compression_processor.get_extension_response() + if processor_response is None: + return None + self._compression_processor = compression_processor + return processor_response + + def get_extension_response(self): + processor_response = self._get_compression_processor_response() + if processor_response is None: + return None + + response = common.ExtensionParameter(self._request.name()) + accepted_method_desc = _create_accepted_method_desc( + self._compression_method_name, + processor_response.get_parameters()) + response.add_parameter(self._METHOD_PARAM, accepted_method_desc) + self._logger.debug( + 'Enable %s extension (method: %s)' % + (self._request.name(), self._compression_method_name)) + return response + + def setup_stream_options(self, stream_options): + if self._compression_processor is None: + return + self._compression_processor.setup_stream_options(stream_options) + + def set_compression_processor_hook(self, hook): + self._compression_processor_hook = hook + + def get_compression_processor(self): + return self._compression_processor + + +class PerFrameCompressionExtensionProcessor(CompressionExtensionProcessorBase): + """WebSocket Per-frame compression extension processor. + + Specification: + http://tools.ietf.org/html/draft-ietf-hybi-websocket-perframe-compression + """ + + _DEFLATE_METHOD = 'deflate' + + def __init__(self, request): + CompressionExtensionProcessorBase.__init__(self, request) + + def name(self): + return common.PERFRAME_COMPRESSION_EXTENSION + + def _lookup_compression_processor(self, method_desc): + if method_desc.name() == self._DEFLATE_METHOD: + return DeflateFrameExtensionProcessor(method_desc) + return None + + +_available_processors[common.PERFRAME_COMPRESSION_EXTENSION] = ( + PerFrameCompressionExtensionProcessor) + + +class DeflateMessageProcessor(ExtensionProcessorInterface): + """Per-message deflate processor.""" + + _S2C_MAX_WINDOW_BITS_PARAM = 's2c_max_window_bits' + _S2C_NO_CONTEXT_TAKEOVER_PARAM = 's2c_no_context_takeover' + _C2S_MAX_WINDOW_BITS_PARAM = 'c2s_max_window_bits' + _C2S_NO_CONTEXT_TAKEOVER_PARAM = 'c2s_no_context_takeover' + + def __init__(self, request): + self._request = request + self._logger = util.get_class_logger(self) + + self._c2s_max_window_bits = None + self._c2s_no_context_takeover = False + self._bfinal = False + + self._compress_outgoing_enabled = False + + # True if a message is fragmented and compression is ongoing. + self._compress_ongoing = False + + # Counters for statistics. + + # Total number of outgoing bytes supplied to this filter. + self._total_outgoing_payload_bytes = 0 + # Total number of bytes sent to the network after applying this filter. + self._total_filtered_outgoing_payload_bytes = 0 + + # Total number of bytes received from the network. + self._total_incoming_payload_bytes = 0 + # Total number of incoming bytes obtained after applying this filter. + self._total_filtered_incoming_payload_bytes = 0 + + def name(self): + return 'deflate' + + def get_extension_response(self): + # Any unknown parameter will be just ignored. + + s2c_max_window_bits = self._request.get_parameter_value( + self._S2C_MAX_WINDOW_BITS_PARAM) + if s2c_max_window_bits is not None: + try: + s2c_max_window_bits = int(s2c_max_window_bits) + except ValueError, e: + return None + if s2c_max_window_bits < 8 or s2c_max_window_bits > 15: + return None + + s2c_no_context_takeover = self._request.has_parameter( + self._S2C_NO_CONTEXT_TAKEOVER_PARAM) + if (s2c_no_context_takeover and + self._request.get_parameter_value( + self._S2C_NO_CONTEXT_TAKEOVER_PARAM) is not None): + return None + + self._deflater = util._RFC1979Deflater( + s2c_max_window_bits, s2c_no_context_takeover) + + self._inflater = util._RFC1979Inflater() + + self._compress_outgoing_enabled = True + + response = common.ExtensionParameter(self._request.name()) + + if s2c_max_window_bits is not None: + response.add_parameter( + self._S2C_MAX_WINDOW_BITS_PARAM, str(s2c_max_window_bits)) + + if s2c_no_context_takeover: + response.add_parameter( + self._S2C_NO_CONTEXT_TAKEOVER_PARAM, None) + + if self._c2s_max_window_bits is not None: + response.add_parameter( + self._C2S_MAX_WINDOW_BITS_PARAM, + str(self._c2s_max_window_bits)) + if self._c2s_no_context_takeover: + response.add_parameter( + self._C2S_NO_CONTEXT_TAKEOVER_PARAM, None) + + self._logger.debug( + 'Enable %s extension (' + 'request: s2c_max_window_bits=%s; s2c_no_context_takeover=%r, ' + 'response: c2s_max_window_bits=%s; c2s_no_context_takeover=%r)' % + (self._request.name(), + s2c_max_window_bits, + s2c_no_context_takeover, + self._c2s_max_window_bits, + self._c2s_no_context_takeover)) + + return response + + def setup_stream_options(self, stream_options): + class _OutgoingMessageFilter(object): + + def __init__(self, parent): + self._parent = parent + + def filter(self, message, end=True, binary=False): + return self._parent._process_outgoing_message( + message, end, binary) + + class _IncomingMessageFilter(object): + + def __init__(self, parent): + self._parent = parent + self._decompress_next_message = False + + def decompress_next_message(self): + self._decompress_next_message = True + + def filter(self, message): + message = self._parent._process_incoming_message( + message, self._decompress_next_message) + self._decompress_next_message = False + return message + + self._outgoing_message_filter = _OutgoingMessageFilter(self) + self._incoming_message_filter = _IncomingMessageFilter(self) + stream_options.outgoing_message_filters.append( + self._outgoing_message_filter) + stream_options.incoming_message_filters.append( + self._incoming_message_filter) + + class _OutgoingFrameFilter(object): + + def __init__(self, parent): + self._parent = parent + self._set_compression_bit = False + + def set_compression_bit(self): + self._set_compression_bit = True + + def filter(self, frame): + self._parent._process_outgoing_frame( + frame, self._set_compression_bit) + self._set_compression_bit = False + + class _IncomingFrameFilter(object): + + def __init__(self, parent): + self._parent = parent + + def filter(self, frame): + self._parent._process_incoming_frame(frame) + + self._outgoing_frame_filter = _OutgoingFrameFilter(self) + self._incoming_frame_filter = _IncomingFrameFilter(self) + stream_options.outgoing_frame_filters.append( + self._outgoing_frame_filter) + stream_options.incoming_frame_filters.append( + self._incoming_frame_filter) + + stream_options.encode_text_message_to_utf8 = False + + def set_c2s_max_window_bits(self, value): + self._c2s_max_window_bits = value + + def set_c2s_no_context_takeover(self, value): + self._c2s_no_context_takeover = value + + def set_bfinal(self, value): + self._bfinal = value + + def enable_outgoing_compression(self): + self._compress_outgoing_enabled = True + + def disable_outgoing_compression(self): + self._compress_outgoing_enabled = False + + def _process_incoming_message(self, message, decompress): + if not decompress: + return message + + received_payload_size = len(message) + self._total_incoming_payload_bytes += received_payload_size + + message = self._inflater.filter(message) + + filtered_payload_size = len(message) + self._total_filtered_incoming_payload_bytes += filtered_payload_size + + _log_decompression_ratio(self._logger, received_payload_size, + self._total_incoming_payload_bytes, + filtered_payload_size, + self._total_filtered_incoming_payload_bytes) + + return message + + def _process_outgoing_message(self, message, end, binary): + if not binary: + message = message.encode('utf-8') + + if not self._compress_outgoing_enabled: + return message + + original_payload_size = len(message) + self._total_outgoing_payload_bytes += original_payload_size + + message = self._deflater.filter( + message, flush=end, bfinal=self._bfinal) + + filtered_payload_size = len(message) + self._total_filtered_outgoing_payload_bytes += filtered_payload_size + + _log_compression_ratio(self._logger, original_payload_size, + self._total_outgoing_payload_bytes, + filtered_payload_size, + self._total_filtered_outgoing_payload_bytes) + + if not self._compress_ongoing: + self._outgoing_frame_filter.set_compression_bit() + self._compress_ongoing = not end + return message + + def _process_incoming_frame(self, frame): + if frame.rsv1 == 1 and not common.is_control_opcode(frame.opcode): + self._incoming_message_filter.decompress_next_message() + frame.rsv1 = 0 + + def _process_outgoing_frame(self, frame, compression_bit): + if (not compression_bit or + common.is_control_opcode(frame.opcode)): + return + + frame.rsv1 = 1 + + +class PerMessageCompressionExtensionProcessor( + CompressionExtensionProcessorBase): + """WebSocket Per-message compression extension processor. + + Specification: + http://tools.ietf.org/html/draft-ietf-hybi-permessage-compression + """ + + _DEFLATE_METHOD = 'deflate' + + def __init__(self, request): + CompressionExtensionProcessorBase.__init__(self, request) + + def name(self): + return common.PERMESSAGE_COMPRESSION_EXTENSION + + def _lookup_compression_processor(self, method_desc): + if method_desc.name() == self._DEFLATE_METHOD: + return DeflateMessageProcessor(method_desc) + return None + + +_available_processors[common.PERMESSAGE_COMPRESSION_EXTENSION] = ( + PerMessageCompressionExtensionProcessor) + + +# Adding vendor-prefixed permessage-compress extension. +# TODO(bashi): Remove this after WebKit stops using vendor prefix. +_available_processors[common.X_WEBKIT_PERMESSAGE_COMPRESSION_EXTENSION] = ( + PerMessageCompressionExtensionProcessor) + + +class MuxExtensionProcessor(ExtensionProcessorInterface): + """WebSocket multiplexing extension processor.""" + + _QUOTA_PARAM = 'quota' + + def __init__(self, request): + self._request = request + + def name(self): + return common.MUX_EXTENSION + + def get_extension_response(self, ws_request, + logical_channel_extensions): + # Mux extension cannot be used after extensions that depend on + # frame boundary, extension data field, or any reserved bits + # which are attributed to each frame. + for extension in logical_channel_extensions: + name = extension.name() + if (name == common.PERFRAME_COMPRESSION_EXTENSION or + name == common.DEFLATE_FRAME_EXTENSION or + name == common.X_WEBKIT_DEFLATE_FRAME_EXTENSION): + return None + + quota = self._request.get_parameter_value(self._QUOTA_PARAM) + if quota is None: + ws_request.mux_quota = 0 + else: + try: + quota = int(quota) + except ValueError, e: + return None + if quota < 0 or quota >= 2 ** 32: + return None + ws_request.mux_quota = quota + + ws_request.mux = True + ws_request.mux_extensions = logical_channel_extensions + return common.ExtensionParameter(common.MUX_EXTENSION) + + def setup_stream_options(self, stream_options): + pass + + +_available_processors[common.MUX_EXTENSION] = MuxExtensionProcessor + + +def get_extension_processor(extension_request): + global _available_processors + processor_class = _available_processors.get(extension_request.name()) + if processor_class is None: + return None + return processor_class(extension_request) + + +# vi:sts=4 sw=4 et diff --git a/module/lib/mod_pywebsocket/handshake/__init__.py b/module/lib/mod_pywebsocket/handshake/__init__.py new file mode 100644 index 000000000..194f6b395 --- /dev/null +++ b/module/lib/mod_pywebsocket/handshake/__init__.py @@ -0,0 +1,110 @@ +# Copyright 2011, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +"""WebSocket opening handshake processor. This class try to apply available +opening handshake processors for each protocol version until a connection is +successfully established. +""" + + +import logging + +from mod_pywebsocket import common +from mod_pywebsocket.handshake import hybi00 +from mod_pywebsocket.handshake import hybi +# Export AbortedByUserException, HandshakeException, and VersionException +# symbol from this module. +from mod_pywebsocket.handshake._base import AbortedByUserException +from mod_pywebsocket.handshake._base import HandshakeException +from mod_pywebsocket.handshake._base import VersionException + + +_LOGGER = logging.getLogger(__name__) + + +def do_handshake(request, dispatcher, allowDraft75=False, strict=False): + """Performs WebSocket handshake. + + Args: + request: mod_python request. + dispatcher: Dispatcher (dispatch.Dispatcher). + allowDraft75: obsolete argument. ignored. + strict: obsolete argument. ignored. + + Handshaker will add attributes such as ws_resource in performing + handshake. + """ + + _LOGGER.debug('Client\'s opening handshake resource: %r', request.uri) + # To print mimetools.Message as escaped one-line string, we converts + # headers_in to dict object. Without conversion, if we use %r, it just + # prints the type and address, and if we use %s, it prints the original + # header string as multiple lines. + # + # Both mimetools.Message and MpTable_Type of mod_python can be + # converted to dict. + # + # mimetools.Message.__str__ returns the original header string. + # dict(mimetools.Message object) returns the map from header names to + # header values. While MpTable_Type doesn't have such __str__ but just + # __repr__ which formats itself as well as dictionary object. + _LOGGER.debug( + 'Client\'s opening handshake headers: %r', dict(request.headers_in)) + + handshakers = [] + handshakers.append( + ('RFC 6455', hybi.Handshaker(request, dispatcher))) + handshakers.append( + ('HyBi 00', hybi00.Handshaker(request, dispatcher))) + + for name, handshaker in handshakers: + _LOGGER.debug('Trying protocol version %s', name) + try: + handshaker.do_handshake() + _LOGGER.info('Established (%s protocol)', name) + return + except HandshakeException, e: + _LOGGER.debug( + 'Failed to complete opening handshake as %s protocol: %r', + name, e) + if e.status: + raise e + except AbortedByUserException, e: + raise + except VersionException, e: + raise + + # TODO(toyoshim): Add a test to cover the case all handshakers fail. + raise HandshakeException( + 'Failed to complete opening handshake for all available protocols', + status=common.HTTP_STATUS_BAD_REQUEST) + + +# vi:sts=4 sw=4 et diff --git a/module/lib/mod_pywebsocket/handshake/_base.py b/module/lib/mod_pywebsocket/handshake/_base.py new file mode 100644 index 000000000..e5c94ca90 --- /dev/null +++ b/module/lib/mod_pywebsocket/handshake/_base.py @@ -0,0 +1,226 @@ +# Copyright 2012, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +"""Common functions and exceptions used by WebSocket opening handshake +processors. +""" + + +from mod_pywebsocket import common +from mod_pywebsocket import http_header_util + + +class AbortedByUserException(Exception): + """Exception for aborting a connection intentionally. + + If this exception is raised in do_extra_handshake handler, the connection + will be abandoned. No other WebSocket or HTTP(S) handler will be invoked. + + If this exception is raised in transfer_data_handler, the connection will + be closed without closing handshake. No other WebSocket or HTTP(S) handler + will be invoked. + """ + + pass + + +class HandshakeException(Exception): + """This exception will be raised when an error occurred while processing + WebSocket initial handshake. + """ + + def __init__(self, name, status=None): + super(HandshakeException, self).__init__(name) + self.status = status + + +class VersionException(Exception): + """This exception will be raised when a version of client request does not + match with version the server supports. + """ + + def __init__(self, name, supported_versions=''): + """Construct an instance. + + Args: + supported_version: a str object to show supported hybi versions. + (e.g. '8, 13') + """ + super(VersionException, self).__init__(name) + self.supported_versions = supported_versions + + +def get_default_port(is_secure): + if is_secure: + return common.DEFAULT_WEB_SOCKET_SECURE_PORT + else: + return common.DEFAULT_WEB_SOCKET_PORT + + +def validate_subprotocol(subprotocol, hixie): + """Validate a value in the Sec-WebSocket-Protocol field. + + See + - RFC 6455: Section 4.1., 4.2.2., and 4.3. + - HyBi 00: Section 4.1. Opening handshake + + Args: + hixie: if True, checks if characters in subprotocol are in range + between U+0020 and U+007E. It's required by HyBi 00 but not by + RFC 6455. + """ + + if not subprotocol: + raise HandshakeException('Invalid subprotocol name: empty') + if hixie: + # Parameter should be in the range U+0020 to U+007E. + for c in subprotocol: + if not 0x20 <= ord(c) <= 0x7e: + raise HandshakeException( + 'Illegal character in subprotocol name: %r' % c) + else: + # Parameter should be encoded HTTP token. + state = http_header_util.ParsingState(subprotocol) + token = http_header_util.consume_token(state) + rest = http_header_util.peek(state) + # If |rest| is not None, |subprotocol| is not one token or invalid. If + # |rest| is None, |token| must not be None because |subprotocol| is + # concatenation of |token| and |rest| and is not None. + if rest is not None: + raise HandshakeException('Invalid non-token string in subprotocol ' + 'name: %r' % rest) + + +def parse_host_header(request): + fields = request.headers_in['Host'].split(':', 1) + if len(fields) == 1: + return fields[0], get_default_port(request.is_https()) + try: + return fields[0], int(fields[1]) + except ValueError, e: + raise HandshakeException('Invalid port number format: %r' % e) + + +def format_header(name, value): + return '%s: %s\r\n' % (name, value) + + +def build_location(request): + """Build WebSocket location for request.""" + location_parts = [] + if request.is_https(): + location_parts.append(common.WEB_SOCKET_SECURE_SCHEME) + else: + location_parts.append(common.WEB_SOCKET_SCHEME) + location_parts.append('://') + host, port = parse_host_header(request) + connection_port = request.connection.local_addr[1] + if port != connection_port: + raise HandshakeException('Header/connection port mismatch: %d/%d' % + (port, connection_port)) + location_parts.append(host) + if (port != get_default_port(request.is_https())): + location_parts.append(':') + location_parts.append(str(port)) + location_parts.append(request.uri) + return ''.join(location_parts) + + +def get_mandatory_header(request, key): + value = request.headers_in.get(key) + if value is None: + raise HandshakeException('Header %s is not defined' % key) + return value + + +def validate_mandatory_header(request, key, expected_value, fail_status=None): + value = get_mandatory_header(request, key) + + if value.lower() != expected_value.lower(): + raise HandshakeException( + 'Expected %r for header %s but found %r (case-insensitive)' % + (expected_value, key, value), status=fail_status) + + +def check_request_line(request): + # 5.1 1. The three character UTF-8 string "GET". + # 5.1 2. A UTF-8-encoded U+0020 SPACE character (0x20 byte). + if request.method != 'GET': + raise HandshakeException('Method is not GET: %r' % request.method) + + if request.protocol != 'HTTP/1.1': + raise HandshakeException('Version is not HTTP/1.1: %r' % + request.protocol) + + +def check_header_lines(request, mandatory_headers): + check_request_line(request) + + # The expected field names, and the meaning of their corresponding + # values, are as follows. + # |Upgrade| and |Connection| + for key, expected_value in mandatory_headers: + validate_mandatory_header(request, key, expected_value) + + +def parse_token_list(data): + """Parses a header value which follows 1#token and returns parsed elements + as a list of strings. + + Leading LWSes must be trimmed. + """ + + state = http_header_util.ParsingState(data) + + token_list = [] + + while True: + token = http_header_util.consume_token(state) + if token is not None: + token_list.append(token) + + http_header_util.consume_lwses(state) + + if http_header_util.peek(state) is None: + break + + if not http_header_util.consume_string(state, ','): + raise HandshakeException( + 'Expected a comma but found %r' % http_header_util.peek(state)) + + http_header_util.consume_lwses(state) + + if len(token_list) == 0: + raise HandshakeException('No valid token found') + + return token_list + + +# vi:sts=4 sw=4 et diff --git a/module/lib/mod_pywebsocket/handshake/hybi.py b/module/lib/mod_pywebsocket/handshake/hybi.py new file mode 100644 index 000000000..fc0e2a096 --- /dev/null +++ b/module/lib/mod_pywebsocket/handshake/hybi.py @@ -0,0 +1,404 @@ +# Copyright 2012, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +"""This file provides the opening handshake processor for the WebSocket +protocol (RFC 6455). + +Specification: +http://tools.ietf.org/html/rfc6455 +""" + + +# Note: request.connection.write is used in this module, even though mod_python +# document says that it should be used only in connection handlers. +# Unfortunately, we have no other options. For example, request.write is not +# suitable because it doesn't allow direct raw bytes writing. + + +import base64 +import logging +import os +import re + +from mod_pywebsocket import common +from mod_pywebsocket.extensions import get_extension_processor +from mod_pywebsocket.handshake._base import check_request_line +from mod_pywebsocket.handshake._base import format_header +from mod_pywebsocket.handshake._base import get_mandatory_header +from mod_pywebsocket.handshake._base import HandshakeException +from mod_pywebsocket.handshake._base import parse_token_list +from mod_pywebsocket.handshake._base import validate_mandatory_header +from mod_pywebsocket.handshake._base import validate_subprotocol +from mod_pywebsocket.handshake._base import VersionException +from mod_pywebsocket.stream import Stream +from mod_pywebsocket.stream import StreamOptions +from mod_pywebsocket import util + + +# Used to validate the value in the Sec-WebSocket-Key header strictly. RFC 4648 +# disallows non-zero padding, so the character right before == must be any of +# A, Q, g and w. +_SEC_WEBSOCKET_KEY_REGEX = re.compile('^[+/0-9A-Za-z]{21}[AQgw]==$') + +# Defining aliases for values used frequently. +_VERSION_HYBI08 = common.VERSION_HYBI08 +_VERSION_HYBI08_STRING = str(_VERSION_HYBI08) +_VERSION_LATEST = common.VERSION_HYBI_LATEST +_VERSION_LATEST_STRING = str(_VERSION_LATEST) +_SUPPORTED_VERSIONS = [ + _VERSION_LATEST, + _VERSION_HYBI08, +] + + +def compute_accept(key): + """Computes value for the Sec-WebSocket-Accept header from value of the + Sec-WebSocket-Key header. + """ + + accept_binary = util.sha1_hash( + key + common.WEBSOCKET_ACCEPT_UUID).digest() + accept = base64.b64encode(accept_binary) + + return (accept, accept_binary) + + +class Handshaker(object): + """Opening handshake processor for the WebSocket protocol (RFC 6455).""" + + def __init__(self, request, dispatcher): + """Construct an instance. + + Args: + request: mod_python request. + dispatcher: Dispatcher (dispatch.Dispatcher). + + Handshaker will add attributes such as ws_resource during handshake. + """ + + self._logger = util.get_class_logger(self) + + self._request = request + self._dispatcher = dispatcher + + def _validate_connection_header(self): + connection = get_mandatory_header( + self._request, common.CONNECTION_HEADER) + + try: + connection_tokens = parse_token_list(connection) + except HandshakeException, e: + raise HandshakeException( + 'Failed to parse %s: %s' % (common.CONNECTION_HEADER, e)) + + connection_is_valid = False + for token in connection_tokens: + if token.lower() == common.UPGRADE_CONNECTION_TYPE.lower(): + connection_is_valid = True + break + if not connection_is_valid: + raise HandshakeException( + '%s header doesn\'t contain "%s"' % + (common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE)) + + def do_handshake(self): + self._request.ws_close_code = None + self._request.ws_close_reason = None + + # Parsing. + + check_request_line(self._request) + + validate_mandatory_header( + self._request, + common.UPGRADE_HEADER, + common.WEBSOCKET_UPGRADE_TYPE) + + self._validate_connection_header() + + self._request.ws_resource = self._request.uri + + unused_host = get_mandatory_header(self._request, common.HOST_HEADER) + + self._request.ws_version = self._check_version() + + # This handshake must be based on latest hybi. We are responsible to + # fallback to HTTP on handshake failure as latest hybi handshake + # specifies. + try: + self._get_origin() + self._set_protocol() + self._parse_extensions() + + # Key validation, response generation. + + key = self._get_key() + (accept, accept_binary) = compute_accept(key) + self._logger.debug( + '%s: %r (%s)', + common.SEC_WEBSOCKET_ACCEPT_HEADER, + accept, + util.hexify(accept_binary)) + + self._logger.debug('Protocol version is RFC 6455') + + # Setup extension processors. + + processors = [] + if self._request.ws_requested_extensions is not None: + for extension_request in self._request.ws_requested_extensions: + processor = get_extension_processor(extension_request) + # Unknown extension requests are just ignored. + if processor is not None: + processors.append(processor) + self._request.ws_extension_processors = processors + + # Extra handshake handler may modify/remove processors. + self._dispatcher.do_extra_handshake(self._request) + processors = filter(lambda processor: processor is not None, + self._request.ws_extension_processors) + + accepted_extensions = [] + + # We need to take care of mux extension here. Extensions that + # are placed before mux should be applied to logical channels. + mux_index = -1 + for i, processor in enumerate(processors): + if processor.name() == common.MUX_EXTENSION: + mux_index = i + break + if mux_index >= 0: + mux_processor = processors[mux_index] + logical_channel_processors = processors[:mux_index] + processors = processors[mux_index+1:] + + for processor in logical_channel_processors: + extension_response = processor.get_extension_response() + if extension_response is None: + # Rejected. + continue + accepted_extensions.append(extension_response) + # Pass a shallow copy of accepted_extensions as extensions for + # logical channels. + mux_response = mux_processor.get_extension_response( + self._request, accepted_extensions[:]) + if mux_response is not None: + accepted_extensions.append(mux_response) + + stream_options = StreamOptions() + + # When there is mux extension, here, |processors| contain only + # prosessors for extensions placed after mux. + for processor in processors: + + extension_response = processor.get_extension_response() + if extension_response is None: + # Rejected. + continue + + accepted_extensions.append(extension_response) + + processor.setup_stream_options(stream_options) + + if len(accepted_extensions) > 0: + self._request.ws_extensions = accepted_extensions + self._logger.debug( + 'Extensions accepted: %r', + map(common.ExtensionParameter.name, accepted_extensions)) + else: + self._request.ws_extensions = None + + self._request.ws_stream = self._create_stream(stream_options) + + if self._request.ws_requested_protocols is not None: + if self._request.ws_protocol is None: + raise HandshakeException( + 'do_extra_handshake must choose one subprotocol from ' + 'ws_requested_protocols and set it to ws_protocol') + validate_subprotocol(self._request.ws_protocol, hixie=False) + + self._logger.debug( + 'Subprotocol accepted: %r', + self._request.ws_protocol) + else: + if self._request.ws_protocol is not None: + raise HandshakeException( + 'ws_protocol must be None when the client didn\'t ' + 'request any subprotocol') + + self._send_handshake(accept) + except HandshakeException, e: + if not e.status: + # Fallback to 400 bad request by default. + e.status = common.HTTP_STATUS_BAD_REQUEST + raise e + + def _get_origin(self): + if self._request.ws_version is _VERSION_HYBI08: + origin_header = common.SEC_WEBSOCKET_ORIGIN_HEADER + else: + origin_header = common.ORIGIN_HEADER + origin = self._request.headers_in.get(origin_header) + if origin is None: + self._logger.debug('Client request does not have origin header') + self._request.ws_origin = origin + + def _check_version(self): + version = get_mandatory_header(self._request, + common.SEC_WEBSOCKET_VERSION_HEADER) + if version == _VERSION_HYBI08_STRING: + return _VERSION_HYBI08 + if version == _VERSION_LATEST_STRING: + return _VERSION_LATEST + + if version.find(',') >= 0: + raise HandshakeException( + 'Multiple versions (%r) are not allowed for header %s' % + (version, common.SEC_WEBSOCKET_VERSION_HEADER), + status=common.HTTP_STATUS_BAD_REQUEST) + raise VersionException( + 'Unsupported version %r for header %s' % + (version, common.SEC_WEBSOCKET_VERSION_HEADER), + supported_versions=', '.join(map(str, _SUPPORTED_VERSIONS))) + + def _set_protocol(self): + self._request.ws_protocol = None + + protocol_header = self._request.headers_in.get( + common.SEC_WEBSOCKET_PROTOCOL_HEADER) + + if protocol_header is None: + self._request.ws_requested_protocols = None + return + + self._request.ws_requested_protocols = parse_token_list( + protocol_header) + self._logger.debug('Subprotocols requested: %r', + self._request.ws_requested_protocols) + + def _parse_extensions(self): + extensions_header = self._request.headers_in.get( + common.SEC_WEBSOCKET_EXTENSIONS_HEADER) + if not extensions_header: + self._request.ws_requested_extensions = None + return + + if self._request.ws_version is common.VERSION_HYBI08: + allow_quoted_string=False + else: + allow_quoted_string=True + try: + self._request.ws_requested_extensions = common.parse_extensions( + extensions_header, allow_quoted_string=allow_quoted_string) + except common.ExtensionParsingException, e: + raise HandshakeException( + 'Failed to parse Sec-WebSocket-Extensions header: %r' % e) + + self._logger.debug( + 'Extensions requested: %r', + map(common.ExtensionParameter.name, + self._request.ws_requested_extensions)) + + def _validate_key(self, key): + if key.find(',') >= 0: + raise HandshakeException('Request has multiple %s header lines or ' + 'contains illegal character \',\': %r' % + (common.SEC_WEBSOCKET_KEY_HEADER, key)) + + # Validate + key_is_valid = False + try: + # Validate key by quick regex match before parsing by base64 + # module. Because base64 module skips invalid characters, we have + # to do this in advance to make this server strictly reject illegal + # keys. + if _SEC_WEBSOCKET_KEY_REGEX.match(key): + decoded_key = base64.b64decode(key) + if len(decoded_key) == 16: + key_is_valid = True + except TypeError, e: + pass + + if not key_is_valid: + raise HandshakeException( + 'Illegal value for header %s: %r' % + (common.SEC_WEBSOCKET_KEY_HEADER, key)) + + return decoded_key + + def _get_key(self): + key = get_mandatory_header( + self._request, common.SEC_WEBSOCKET_KEY_HEADER) + + decoded_key = self._validate_key(key) + + self._logger.debug( + '%s: %r (%s)', + common.SEC_WEBSOCKET_KEY_HEADER, + key, + util.hexify(decoded_key)) + + return key + + def _create_stream(self, stream_options): + return Stream(self._request, stream_options) + + def _create_handshake_response(self, accept): + response = [] + + response.append('HTTP/1.1 101 Switching Protocols\r\n') + + response.append(format_header( + common.UPGRADE_HEADER, common.WEBSOCKET_UPGRADE_TYPE)) + response.append(format_header( + common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE)) + response.append(format_header( + common.SEC_WEBSOCKET_ACCEPT_HEADER, accept)) + if self._request.ws_protocol is not None: + response.append(format_header( + common.SEC_WEBSOCKET_PROTOCOL_HEADER, + self._request.ws_protocol)) + if (self._request.ws_extensions is not None and + len(self._request.ws_extensions) != 0): + response.append(format_header( + common.SEC_WEBSOCKET_EXTENSIONS_HEADER, + common.format_extensions(self._request.ws_extensions))) + response.append('\r\n') + + return ''.join(response) + + def _send_handshake(self, accept): + raw_response = self._create_handshake_response(accept) + self._request.connection.write(raw_response) + self._logger.debug('Sent server\'s opening handshake: %r', + raw_response) + + +# vi:sts=4 sw=4 et diff --git a/module/lib/mod_pywebsocket/handshake/hybi00.py b/module/lib/mod_pywebsocket/handshake/hybi00.py new file mode 100644 index 000000000..cc6f8dc43 --- /dev/null +++ b/module/lib/mod_pywebsocket/handshake/hybi00.py @@ -0,0 +1,242 @@ +# Copyright 2011, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +"""This file provides the opening handshake processor for the WebSocket +protocol version HyBi 00. + +Specification: +http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-00 +""" + + +# Note: request.connection.write/read are used in this module, even though +# mod_python document says that they should be used only in connection +# handlers. Unfortunately, we have no other options. For example, +# request.write/read are not suitable because they don't allow direct raw bytes +# writing/reading. + + +import logging +import re +import struct + +from mod_pywebsocket import common +from mod_pywebsocket.stream import StreamHixie75 +from mod_pywebsocket import util +from mod_pywebsocket.handshake._base import HandshakeException +from mod_pywebsocket.handshake._base import build_location +from mod_pywebsocket.handshake._base import check_header_lines +from mod_pywebsocket.handshake._base import format_header +from mod_pywebsocket.handshake._base import get_mandatory_header +from mod_pywebsocket.handshake._base import validate_subprotocol + + +_MANDATORY_HEADERS = [ + # key, expected value or None + [common.UPGRADE_HEADER, common.WEBSOCKET_UPGRADE_TYPE_HIXIE75], + [common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE], +] + + +class Handshaker(object): + """Opening handshake processor for the WebSocket protocol version HyBi 00. + """ + + def __init__(self, request, dispatcher): + """Construct an instance. + + Args: + request: mod_python request. + dispatcher: Dispatcher (dispatch.Dispatcher). + + Handshaker will add attributes such as ws_resource in performing + handshake. + """ + + self._logger = util.get_class_logger(self) + + self._request = request + self._dispatcher = dispatcher + + def do_handshake(self): + """Perform WebSocket Handshake. + + On _request, we set + ws_resource, ws_protocol, ws_location, ws_origin, ws_challenge, + ws_challenge_md5: WebSocket handshake information. + ws_stream: Frame generation/parsing class. + ws_version: Protocol version. + + Raises: + HandshakeException: when any error happened in parsing the opening + handshake request. + """ + + # 5.1 Reading the client's opening handshake. + # dispatcher sets it in self._request. + check_header_lines(self._request, _MANDATORY_HEADERS) + self._set_resource() + self._set_subprotocol() + self._set_location() + self._set_origin() + self._set_challenge_response() + self._set_protocol_version() + + self._dispatcher.do_extra_handshake(self._request) + + self._send_handshake() + + def _set_resource(self): + self._request.ws_resource = self._request.uri + + def _set_subprotocol(self): + # |Sec-WebSocket-Protocol| + subprotocol = self._request.headers_in.get( + common.SEC_WEBSOCKET_PROTOCOL_HEADER) + if subprotocol is not None: + validate_subprotocol(subprotocol, hixie=True) + self._request.ws_protocol = subprotocol + + def _set_location(self): + # |Host| + host = self._request.headers_in.get(common.HOST_HEADER) + if host is not None: + self._request.ws_location = build_location(self._request) + # TODO(ukai): check host is this host. + + def _set_origin(self): + # |Origin| + origin = self._request.headers_in.get(common.ORIGIN_HEADER) + if origin is not None: + self._request.ws_origin = origin + + def _set_protocol_version(self): + # |Sec-WebSocket-Draft| + draft = self._request.headers_in.get(common.SEC_WEBSOCKET_DRAFT_HEADER) + if draft is not None and draft != '0': + raise HandshakeException('Illegal value for %s: %s' % + (common.SEC_WEBSOCKET_DRAFT_HEADER, + draft)) + + self._logger.debug('Protocol version is HyBi 00') + self._request.ws_version = common.VERSION_HYBI00 + self._request.ws_stream = StreamHixie75(self._request, True) + + def _set_challenge_response(self): + # 5.2 4-8. + self._request.ws_challenge = self._get_challenge() + # 5.2 9. let /response/ be the MD5 finterprint of /challenge/ + self._request.ws_challenge_md5 = util.md5_hash( + self._request.ws_challenge).digest() + self._logger.debug( + 'Challenge: %r (%s)', + self._request.ws_challenge, + util.hexify(self._request.ws_challenge)) + self._logger.debug( + 'Challenge response: %r (%s)', + self._request.ws_challenge_md5, + util.hexify(self._request.ws_challenge_md5)) + + def _get_key_value(self, key_field): + key_value = get_mandatory_header(self._request, key_field) + + self._logger.debug('%s: %r', key_field, key_value) + + # 5.2 4. let /key-number_n/ be the digits (characters in the range + # U+0030 DIGIT ZERO (0) to U+0039 DIGIT NINE (9)) in /key_n/, + # interpreted as a base ten integer, ignoring all other characters + # in /key_n/. + try: + key_number = int(re.sub("\\D", "", key_value)) + except: + raise HandshakeException('%s field contains no digit' % key_field) + # 5.2 5. let /spaces_n/ be the number of U+0020 SPACE characters + # in /key_n/. + spaces = re.subn(" ", "", key_value)[1] + if spaces == 0: + raise HandshakeException('%s field contains no space' % key_field) + + self._logger.debug( + '%s: Key-number is %d and number of spaces is %d', + key_field, key_number, spaces) + + # 5.2 6. if /key-number_n/ is not an integral multiple of /spaces_n/ + # then abort the WebSocket connection. + if key_number % spaces != 0: + raise HandshakeException( + '%s: Key-number (%d) is not an integral multiple of spaces ' + '(%d)' % (key_field, key_number, spaces)) + # 5.2 7. let /part_n/ be /key-number_n/ divided by /spaces_n/. + part = key_number / spaces + self._logger.debug('%s: Part is %d', key_field, part) + return part + + def _get_challenge(self): + # 5.2 4-7. + key1 = self._get_key_value(common.SEC_WEBSOCKET_KEY1_HEADER) + key2 = self._get_key_value(common.SEC_WEBSOCKET_KEY2_HEADER) + # 5.2 8. let /challenge/ be the concatenation of /part_1/, + challenge = '' + challenge += struct.pack('!I', key1) # network byteorder int + challenge += struct.pack('!I', key2) # network byteorder int + challenge += self._request.connection.read(8) + return challenge + + def _send_handshake(self): + response = [] + + # 5.2 10. send the following line. + response.append('HTTP/1.1 101 WebSocket Protocol Handshake\r\n') + + # 5.2 11. send the following fields to the client. + response.append(format_header( + common.UPGRADE_HEADER, common.WEBSOCKET_UPGRADE_TYPE_HIXIE75)) + response.append(format_header( + common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE)) + response.append(format_header( + common.SEC_WEBSOCKET_LOCATION_HEADER, self._request.ws_location)) + response.append(format_header( + common.SEC_WEBSOCKET_ORIGIN_HEADER, self._request.ws_origin)) + if self._request.ws_protocol: + response.append(format_header( + common.SEC_WEBSOCKET_PROTOCOL_HEADER, + self._request.ws_protocol)) + # 5.2 12. send two bytes 0x0D 0x0A. + response.append('\r\n') + # 5.2 13. send /response/ + response.append(self._request.ws_challenge_md5) + + raw_response = ''.join(response) + self._request.connection.write(raw_response) + self._logger.debug('Sent server\'s opening handshake: %r', + raw_response) + + +# vi:sts=4 sw=4 et diff --git a/module/lib/mod_pywebsocket/headerparserhandler.py b/module/lib/mod_pywebsocket/headerparserhandler.py new file mode 100644 index 000000000..2cc62de04 --- /dev/null +++ b/module/lib/mod_pywebsocket/headerparserhandler.py @@ -0,0 +1,244 @@ +# Copyright 2011, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +"""PythonHeaderParserHandler for mod_pywebsocket. + +Apache HTTP Server and mod_python must be configured such that this +function is called to handle WebSocket request. +""" + + +import logging + +from mod_python import apache + +from mod_pywebsocket import common +from mod_pywebsocket import dispatch +from mod_pywebsocket import handshake +from mod_pywebsocket import util + + +# PythonOption to specify the handler root directory. +_PYOPT_HANDLER_ROOT = 'mod_pywebsocket.handler_root' + +# PythonOption to specify the handler scan directory. +# This must be a directory under the root directory. +# The default is the root directory. +_PYOPT_HANDLER_SCAN = 'mod_pywebsocket.handler_scan' + +# PythonOption to allow handlers whose canonical path is +# not under the root directory. It's disallowed by default. +# Set this option with value of 'yes' to allow. +_PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT = ( + 'mod_pywebsocket.allow_handlers_outside_root_dir') +# Map from values to their meanings. 'Yes' and 'No' are allowed just for +# compatibility. +_PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT_DEFINITION = { + 'off': False, 'no': False, 'on': True, 'yes': True} + +# (Obsolete option. Ignored.) +# PythonOption to specify to allow handshake defined in Hixie 75 version +# protocol. The default is None (Off) +_PYOPT_ALLOW_DRAFT75 = 'mod_pywebsocket.allow_draft75' +# Map from values to their meanings. +_PYOPT_ALLOW_DRAFT75_DEFINITION = {'off': False, 'on': True} + + +class ApacheLogHandler(logging.Handler): + """Wrapper logging.Handler to emit log message to apache's error.log.""" + + _LEVELS = { + logging.DEBUG: apache.APLOG_DEBUG, + logging.INFO: apache.APLOG_INFO, + logging.WARNING: apache.APLOG_WARNING, + logging.ERROR: apache.APLOG_ERR, + logging.CRITICAL: apache.APLOG_CRIT, + } + + def __init__(self, request=None): + logging.Handler.__init__(self) + self._log_error = apache.log_error + if request is not None: + self._log_error = request.log_error + + # Time and level will be printed by Apache. + self._formatter = logging.Formatter('%(name)s: %(message)s') + + def emit(self, record): + apache_level = apache.APLOG_DEBUG + if record.levelno in ApacheLogHandler._LEVELS: + apache_level = ApacheLogHandler._LEVELS[record.levelno] + + msg = self._formatter.format(record) + + # "server" parameter must be passed to have "level" parameter work. + # If only "level" parameter is passed, nothing shows up on Apache's + # log. However, at this point, we cannot get the server object of the + # virtual host which will process WebSocket requests. The only server + # object we can get here is apache.main_server. But Wherever (server + # configuration context or virtual host context) we put + # PythonHeaderParserHandler directive, apache.main_server just points + # the main server instance (not any of virtual server instance). Then, + # Apache follows LogLevel directive in the server configuration context + # to filter logs. So, we need to specify LogLevel in the server + # configuration context. Even if we specify "LogLevel debug" in the + # virtual host context which actually handles WebSocket connections, + # DEBUG level logs never show up unless "LogLevel debug" is specified + # in the server configuration context. + # + # TODO(tyoshino): Provide logging methods on request object. When + # request is mp_request object (when used together with Apache), the + # methods call request.log_error indirectly. When request is + # _StandaloneRequest, the methods call Python's logging facility which + # we create in standalone.py. + self._log_error(msg, apache_level, apache.main_server) + + +def _configure_logging(): + logger = logging.getLogger() + # Logs are filtered by Apache based on LogLevel directive in Apache + # configuration file. We must just pass logs for all levels to + # ApacheLogHandler. + logger.setLevel(logging.DEBUG) + logger.addHandler(ApacheLogHandler()) + + +_configure_logging() + +_LOGGER = logging.getLogger(__name__) + + +def _parse_option(name, value, definition): + if value is None: + return False + + meaning = definition.get(value.lower()) + if meaning is None: + raise Exception('Invalid value for PythonOption %s: %r' % + (name, value)) + return meaning + + +def _create_dispatcher(): + _LOGGER.info('Initializing Dispatcher') + + options = apache.main_server.get_options() + + handler_root = options.get(_PYOPT_HANDLER_ROOT, None) + if not handler_root: + raise Exception('PythonOption %s is not defined' % _PYOPT_HANDLER_ROOT, + apache.APLOG_ERR) + + handler_scan = options.get(_PYOPT_HANDLER_SCAN, handler_root) + + allow_handlers_outside_root = _parse_option( + _PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT, + options.get(_PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT), + _PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT_DEFINITION) + + dispatcher = dispatch.Dispatcher( + handler_root, handler_scan, allow_handlers_outside_root) + + for warning in dispatcher.source_warnings(): + apache.log_error('mod_pywebsocket: %s' % warning, apache.APLOG_WARNING) + + return dispatcher + + +# Initialize +_dispatcher = _create_dispatcher() + + +def headerparserhandler(request): + """Handle request. + + Args: + request: mod_python request. + + This function is named headerparserhandler because it is the default + name for a PythonHeaderParserHandler. + """ + + handshake_is_done = False + try: + # Fallback to default http handler for request paths for which + # we don't have request handlers. + if not _dispatcher.get_handler_suite(request.uri): + request.log_error('No handler for resource: %r' % request.uri, + apache.APLOG_INFO) + request.log_error('Fallback to Apache', apache.APLOG_INFO) + return apache.DECLINED + except dispatch.DispatchException, e: + request.log_error('mod_pywebsocket: %s' % e, apache.APLOG_INFO) + if not handshake_is_done: + return e.status + + try: + allow_draft75 = _parse_option( + _PYOPT_ALLOW_DRAFT75, + apache.main_server.get_options().get(_PYOPT_ALLOW_DRAFT75), + _PYOPT_ALLOW_DRAFT75_DEFINITION) + + try: + handshake.do_handshake( + request, _dispatcher, allowDraft75=allow_draft75) + except handshake.VersionException, e: + request.log_error('mod_pywebsocket: %s' % e, apache.APLOG_INFO) + request.err_headers_out.add(common.SEC_WEBSOCKET_VERSION_HEADER, + e.supported_versions) + return apache.HTTP_BAD_REQUEST + except handshake.HandshakeException, e: + # Handshake for ws/wss failed. + # Send http response with error status. + request.log_error('mod_pywebsocket: %s' % e, apache.APLOG_INFO) + return e.status + + handshake_is_done = True + request._dispatcher = _dispatcher + _dispatcher.transfer_data(request) + except handshake.AbortedByUserException, e: + request.log_error('mod_pywebsocket: %s' % e, apache.APLOG_INFO) + except Exception, e: + # DispatchException can also be thrown if something is wrong in + # pywebsocket code. It's caught here, then. + + request.log_error('mod_pywebsocket: %s\n%s' % + (e, util.get_stack_trace()), + apache.APLOG_ERR) + # Unknown exceptions before handshake mean Apache must handle its + # request with another handler. + if not handshake_is_done: + return apache.DECLINED + # Set assbackwards to suppress response header generation by Apache. + request.assbackwards = 1 + return apache.DONE # Return DONE such that no other handlers are invoked. + + +# vi:sts=4 sw=4 et diff --git a/module/lib/mod_pywebsocket/http_header_util.py b/module/lib/mod_pywebsocket/http_header_util.py new file mode 100644 index 000000000..b77465393 --- /dev/null +++ b/module/lib/mod_pywebsocket/http_header_util.py @@ -0,0 +1,263 @@ +# Copyright 2011, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +"""Utilities for parsing and formatting headers that follow the grammar defined +in HTTP RFC http://www.ietf.org/rfc/rfc2616.txt. +""" + + +import urlparse + + +_SEPARATORS = '()<>@,;:\\"/[]?={} \t' + + +def _is_char(c): + """Returns true iff c is in CHAR as specified in HTTP RFC.""" + + return ord(c) <= 127 + + +def _is_ctl(c): + """Returns true iff c is in CTL as specified in HTTP RFC.""" + + return ord(c) <= 31 or ord(c) == 127 + + +class ParsingState(object): + + def __init__(self, data): + self.data = data + self.head = 0 + + +def peek(state, pos=0): + """Peeks the character at pos from the head of data.""" + + if state.head + pos >= len(state.data): + return None + + return state.data[state.head + pos] + + +def consume(state, amount=1): + """Consumes specified amount of bytes from the head and returns the + consumed bytes. If there's not enough bytes to consume, returns None. + """ + + if state.head + amount > len(state.data): + return None + + result = state.data[state.head:state.head + amount] + state.head = state.head + amount + return result + + +def consume_string(state, expected): + """Given a parsing state and a expected string, consumes the string from + the head. Returns True if consumed successfully. Otherwise, returns + False. + """ + + pos = 0 + + for c in expected: + if c != peek(state, pos): + return False + pos += 1 + + consume(state, pos) + return True + + +def consume_lws(state): + """Consumes a LWS from the head. Returns True if any LWS is consumed. + Otherwise, returns False. + + LWS = [CRLF] 1*( SP | HT ) + """ + + original_head = state.head + + consume_string(state, '\r\n') + + pos = 0 + + while True: + c = peek(state, pos) + if c == ' ' or c == '\t': + pos += 1 + else: + if pos == 0: + state.head = original_head + return False + else: + consume(state, pos) + return True + + +def consume_lwses(state): + """Consumes *LWS from the head.""" + + while consume_lws(state): + pass + + +def consume_token(state): + """Consumes a token from the head. Returns the token or None if no token + was found. + """ + + pos = 0 + + while True: + c = peek(state, pos) + if c is None or c in _SEPARATORS or _is_ctl(c) or not _is_char(c): + if pos == 0: + return None + + return consume(state, pos) + else: + pos += 1 + + +def consume_token_or_quoted_string(state): + """Consumes a token or a quoted-string, and returns the token or unquoted + string. If no token or quoted-string was found, returns None. + """ + + original_head = state.head + + if not consume_string(state, '"'): + return consume_token(state) + + result = [] + + expect_quoted_pair = False + + while True: + if not expect_quoted_pair and consume_lws(state): + result.append(' ') + continue + + c = consume(state) + if c is None: + # quoted-string is not enclosed with double quotation + state.head = original_head + return None + elif expect_quoted_pair: + expect_quoted_pair = False + if _is_char(c): + result.append(c) + else: + # Non CHAR character found in quoted-pair + state.head = original_head + return None + elif c == '\\': + expect_quoted_pair = True + elif c == '"': + return ''.join(result) + elif _is_ctl(c): + # Invalid character %r found in qdtext + state.head = original_head + return None + else: + result.append(c) + + +def quote_if_necessary(s): + """Quotes arbitrary string into quoted-string.""" + + quote = False + if s == '': + return '""' + + result = [] + for c in s: + if c == '"' or c in _SEPARATORS or _is_ctl(c) or not _is_char(c): + quote = True + + if c == '"' or _is_ctl(c): + result.append('\\' + c) + else: + result.append(c) + + if quote: + return '"' + ''.join(result) + '"' + else: + return ''.join(result) + + +def parse_uri(uri): + """Parse absolute URI then return host, port and resource.""" + + parsed = urlparse.urlsplit(uri) + if parsed.scheme != 'wss' and parsed.scheme != 'ws': + # |uri| must be a relative URI. + # TODO(toyoshim): Should validate |uri|. + return None, None, uri + + if parsed.hostname is None: + return None, None, None + + port = None + try: + port = parsed.port + except ValueError, e: + # port property cause ValueError on invalid null port description like + # 'ws://host:/path'. + return None, None, None + + if port is None: + if parsed.scheme == 'ws': + port = 80 + else: + port = 443 + + path = parsed.path + if not path: + path += '/' + if parsed.query: + path += '?' + parsed.query + if parsed.fragment: + path += '#' + parsed.fragment + + return parsed.hostname, port, path + + +try: + urlparse.uses_netloc.index('ws') +except ValueError, e: + # urlparse in Python2.5.1 doesn't have 'ws' and 'wss' entries. + urlparse.uses_netloc.append('ws') + urlparse.uses_netloc.append('wss') + + +# vi:sts=4 sw=4 et diff --git a/module/lib/mod_pywebsocket/memorizingfile.py b/module/lib/mod_pywebsocket/memorizingfile.py new file mode 100644 index 000000000..4d4cd9585 --- /dev/null +++ b/module/lib/mod_pywebsocket/memorizingfile.py @@ -0,0 +1,99 @@ +#!/usr/bin/env python +# +# Copyright 2011, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +"""Memorizing file. + +A memorizing file wraps a file and memorizes lines read by readline. +""" + + +import sys + + +class MemorizingFile(object): + """MemorizingFile wraps a file and memorizes lines read by readline. + + Note that data read by other methods are not memorized. This behavior + is good enough for memorizing lines SimpleHTTPServer reads before + the control reaches WebSocketRequestHandler. + """ + + def __init__(self, file_, max_memorized_lines=sys.maxint): + """Construct an instance. + + Args: + file_: the file object to wrap. + max_memorized_lines: the maximum number of lines to memorize. + Only the first max_memorized_lines are memorized. + Default: sys.maxint. + """ + + self._file = file_ + self._memorized_lines = [] + self._max_memorized_lines = max_memorized_lines + self._buffered = False + self._buffered_line = None + + def __getattribute__(self, name): + if name in ('_file', '_memorized_lines', '_max_memorized_lines', + '_buffered', '_buffered_line', 'readline', + 'get_memorized_lines'): + return object.__getattribute__(self, name) + return self._file.__getattribute__(name) + + def readline(self, size=-1): + """Override file.readline and memorize the line read. + + Note that even if size is specified and smaller than actual size, + the whole line will be read out from underlying file object by + subsequent readline calls. + """ + + if self._buffered: + line = self._buffered_line + self._buffered = False + else: + line = self._file.readline() + if line and len(self._memorized_lines) < self._max_memorized_lines: + self._memorized_lines.append(line) + if size >= 0 and size < len(line): + self._buffered = True + self._buffered_line = line[size:] + return line[:size] + return line + + def get_memorized_lines(self): + """Get lines memorized so far.""" + return self._memorized_lines + + +# vi:sts=4 sw=4 et diff --git a/module/lib/mod_pywebsocket/msgutil.py b/module/lib/mod_pywebsocket/msgutil.py new file mode 100644 index 000000000..4c1a0114b --- /dev/null +++ b/module/lib/mod_pywebsocket/msgutil.py @@ -0,0 +1,219 @@ +# Copyright 2011, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +"""Message related utilities. + +Note: request.connection.write/read are used in this module, even though +mod_python document says that they should be used only in connection +handlers. Unfortunately, we have no other options. For example, +request.write/read are not suitable because they don't allow direct raw +bytes writing/reading. +""" + + +import Queue +import threading + + +# Export Exception symbols from msgutil for backward compatibility +from mod_pywebsocket._stream_base import ConnectionTerminatedException +from mod_pywebsocket._stream_base import InvalidFrameException +from mod_pywebsocket._stream_base import BadOperationException +from mod_pywebsocket._stream_base import UnsupportedFrameException + + +# An API for handler to send/receive WebSocket messages. +def close_connection(request): + """Close connection. + + Args: + request: mod_python request. + """ + request.ws_stream.close_connection() + + +def send_message(request, payload_data, end=True, binary=False): + """Send a message (or part of a message). + + Args: + request: mod_python request. + payload_data: unicode text or str binary to send. + end: True to terminate a message. + False to send payload_data as part of a message that is to be + terminated by next or later send_message call with end=True. + binary: send payload_data as binary frame(s). + Raises: + BadOperationException: when server already terminated. + """ + request.ws_stream.send_message(payload_data, end, binary) + + +def receive_message(request): + """Receive a WebSocket frame and return its payload as a text in + unicode or a binary in str. + + Args: + request: mod_python request. + Raises: + InvalidFrameException: when client send invalid frame. + UnsupportedFrameException: when client send unsupported frame e.g. some + of reserved bit is set but no extension can + recognize it. + InvalidUTF8Exception: when client send a text frame containing any + invalid UTF-8 string. + ConnectionTerminatedException: when the connection is closed + unexpectedly. + BadOperationException: when client already terminated. + """ + return request.ws_stream.receive_message() + + +def send_ping(request, body=''): + request.ws_stream.send_ping(body) + + +class MessageReceiver(threading.Thread): + """This class receives messages from the client. + + This class provides three ways to receive messages: blocking, + non-blocking, and via callback. Callback has the highest precedence. + + Note: This class should not be used with the standalone server for wss + because pyOpenSSL used by the server raises a fatal error if the socket + is accessed from multiple threads. + """ + + def __init__(self, request, onmessage=None): + """Construct an instance. + + Args: + request: mod_python request. + onmessage: a function to be called when a message is received. + May be None. If not None, the function is called on + another thread. In that case, MessageReceiver.receive + and MessageReceiver.receive_nowait are useless + because they will never return any messages. + """ + + threading.Thread.__init__(self) + self._request = request + self._queue = Queue.Queue() + self._onmessage = onmessage + self._stop_requested = False + self.setDaemon(True) + self.start() + + def run(self): + try: + while not self._stop_requested: + message = receive_message(self._request) + if self._onmessage: + self._onmessage(message) + else: + self._queue.put(message) + finally: + close_connection(self._request) + + def receive(self): + """ Receive a message from the channel, blocking. + + Returns: + message as a unicode string. + """ + return self._queue.get() + + def receive_nowait(self): + """ Receive a message from the channel, non-blocking. + + Returns: + message as a unicode string if available. None otherwise. + """ + try: + message = self._queue.get_nowait() + except Queue.Empty: + message = None + return message + + def stop(self): + """Request to stop this instance. + + The instance will be stopped after receiving the next message. + This method may not be very useful, but there is no clean way + in Python to forcefully stop a running thread. + """ + self._stop_requested = True + + +class MessageSender(threading.Thread): + """This class sends messages to the client. + + This class provides both synchronous and asynchronous ways to send + messages. + + Note: This class should not be used with the standalone server for wss + because pyOpenSSL used by the server raises a fatal error if the socket + is accessed from multiple threads. + """ + + def __init__(self, request): + """Construct an instance. + + Args: + request: mod_python request. + """ + threading.Thread.__init__(self) + self._request = request + self._queue = Queue.Queue() + self.setDaemon(True) + self.start() + + def run(self): + while True: + message, condition = self._queue.get() + condition.acquire() + send_message(self._request, message) + condition.notify() + condition.release() + + def send(self, message): + """Send a message, blocking.""" + + condition = threading.Condition() + condition.acquire() + self._queue.put((message, condition)) + condition.wait() + + def send_nowait(self, message): + """Send a message, non-blocking.""" + + self._queue.put((message, threading.Condition())) + + +# vi:sts=4 sw=4 et diff --git a/module/lib/mod_pywebsocket/mux.py b/module/lib/mod_pywebsocket/mux.py new file mode 100644 index 000000000..f0bdd2461 --- /dev/null +++ b/module/lib/mod_pywebsocket/mux.py @@ -0,0 +1,1636 @@ +# Copyright 2012, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +"""This file provides classes and helper functions for multiplexing extension. + +Specification: +http://tools.ietf.org/html/draft-ietf-hybi-websocket-multiplexing-06 +""" + + +import collections +import copy +import email +import email.parser +import logging +import math +import struct +import threading +import traceback + +from mod_pywebsocket import common +from mod_pywebsocket import handshake +from mod_pywebsocket import util +from mod_pywebsocket._stream_base import BadOperationException +from mod_pywebsocket._stream_base import ConnectionTerminatedException +from mod_pywebsocket._stream_hybi import Frame +from mod_pywebsocket._stream_hybi import Stream +from mod_pywebsocket._stream_hybi import StreamOptions +from mod_pywebsocket._stream_hybi import create_binary_frame +from mod_pywebsocket._stream_hybi import create_closing_handshake_body +from mod_pywebsocket._stream_hybi import create_header +from mod_pywebsocket._stream_hybi import create_length_header +from mod_pywebsocket._stream_hybi import parse_frame +from mod_pywebsocket.handshake import hybi + + +_CONTROL_CHANNEL_ID = 0 +_DEFAULT_CHANNEL_ID = 1 + +_MUX_OPCODE_ADD_CHANNEL_REQUEST = 0 +_MUX_OPCODE_ADD_CHANNEL_RESPONSE = 1 +_MUX_OPCODE_FLOW_CONTROL = 2 +_MUX_OPCODE_DROP_CHANNEL = 3 +_MUX_OPCODE_NEW_CHANNEL_SLOT = 4 + +_MAX_CHANNEL_ID = 2 ** 29 - 1 + +_INITIAL_NUMBER_OF_CHANNEL_SLOTS = 64 +_INITIAL_QUOTA_FOR_CLIENT = 8 * 1024 + +_HANDSHAKE_ENCODING_IDENTITY = 0 +_HANDSHAKE_ENCODING_DELTA = 1 + +# We need only these status code for now. +_HTTP_BAD_RESPONSE_MESSAGES = { + common.HTTP_STATUS_BAD_REQUEST: 'Bad Request', +} + +# DropChannel reason code +# TODO(bashi): Define all reason code defined in -05 draft. +_DROP_CODE_NORMAL_CLOSURE = 1000 + +_DROP_CODE_INVALID_ENCAPSULATING_MESSAGE = 2001 +_DROP_CODE_CHANNEL_ID_TRUNCATED = 2002 +_DROP_CODE_ENCAPSULATED_FRAME_IS_TRUNCATED = 2003 +_DROP_CODE_UNKNOWN_MUX_OPCODE = 2004 +_DROP_CODE_INVALID_MUX_CONTROL_BLOCK = 2005 +_DROP_CODE_CHANNEL_ALREADY_EXISTS = 2006 +_DROP_CODE_NEW_CHANNEL_SLOT_VIOLATION = 2007 + +_DROP_CODE_UNKNOWN_REQUEST_ENCODING = 3002 +_DROP_CODE_SEND_QUOTA_VIOLATION = 3005 +_DROP_CODE_ACKNOWLEDGED = 3008 + + +class MuxUnexpectedException(Exception): + """Exception in handling multiplexing extension.""" + pass + + +# Temporary +class MuxNotImplementedException(Exception): + """Raised when a flow enters unimplemented code path.""" + pass + + +class LogicalConnectionClosedException(Exception): + """Raised when logical connection is gracefully closed.""" + pass + + +class PhysicalConnectionError(Exception): + """Raised when there is a physical connection error.""" + def __init__(self, drop_code, message=''): + super(PhysicalConnectionError, self).__init__( + 'code=%d, message=%r' % (drop_code, message)) + self.drop_code = drop_code + self.message = message + + +class LogicalChannelError(Exception): + """Raised when there is a logical channel error.""" + def __init__(self, channel_id, drop_code, message=''): + super(LogicalChannelError, self).__init__( + 'channel_id=%d, code=%d, message=%r' % ( + channel_id, drop_code, message)) + self.channel_id = channel_id + self.drop_code = drop_code + self.message = message + + +def _encode_channel_id(channel_id): + if channel_id < 0: + raise ValueError('Channel id %d must not be negative' % channel_id) + + if channel_id < 2 ** 7: + return chr(channel_id) + if channel_id < 2 ** 14: + return struct.pack('!H', 0x8000 + channel_id) + if channel_id < 2 ** 21: + first = chr(0xc0 + (channel_id >> 16)) + return first + struct.pack('!H', channel_id & 0xffff) + if channel_id < 2 ** 29: + return struct.pack('!L', 0xe0000000 + channel_id) + + raise ValueError('Channel id %d is too large' % channel_id) + + +def _encode_number(number): + return create_length_header(number, False) + + +def _create_add_channel_response(channel_id, encoded_handshake, + encoding=0, rejected=False, + outer_frame_mask=False): + if encoding != 0 and encoding != 1: + raise ValueError('Invalid encoding %d' % encoding) + + first_byte = ((_MUX_OPCODE_ADD_CHANNEL_RESPONSE << 5) | + (rejected << 4) | encoding) + block = (chr(first_byte) + + _encode_channel_id(channel_id) + + _encode_number(len(encoded_handshake)) + + encoded_handshake) + payload = _encode_channel_id(_CONTROL_CHANNEL_ID) + block + return create_binary_frame(payload, mask=outer_frame_mask) + + +def _create_drop_channel(channel_id, code=None, message='', + outer_frame_mask=False): + if len(message) > 0 and code is None: + raise ValueError('Code must be specified if message is specified') + + first_byte = _MUX_OPCODE_DROP_CHANNEL << 5 + block = chr(first_byte) + _encode_channel_id(channel_id) + if code is None: + block += _encode_number(0) # Reason size + else: + reason = struct.pack('!H', code) + message + reason_size = _encode_number(len(reason)) + block += reason_size + reason + + payload = _encode_channel_id(_CONTROL_CHANNEL_ID) + block + return create_binary_frame(payload, mask=outer_frame_mask) + + +def _create_flow_control(channel_id, replenished_quota, + outer_frame_mask=False): + first_byte = _MUX_OPCODE_FLOW_CONTROL << 5 + block = (chr(first_byte) + + _encode_channel_id(channel_id) + + _encode_number(replenished_quota)) + payload = _encode_channel_id(_CONTROL_CHANNEL_ID) + block + return create_binary_frame(payload, mask=outer_frame_mask) + + +def _create_new_channel_slot(slots, send_quota, outer_frame_mask=False): + if slots < 0 or send_quota < 0: + raise ValueError('slots and send_quota must be non-negative.') + first_byte = _MUX_OPCODE_NEW_CHANNEL_SLOT << 5 + block = (chr(first_byte) + + _encode_number(slots) + + _encode_number(send_quota)) + payload = _encode_channel_id(_CONTROL_CHANNEL_ID) + block + return create_binary_frame(payload, mask=outer_frame_mask) + + +def _create_fallback_new_channel_slot(outer_frame_mask=False): + first_byte = (_MUX_OPCODE_NEW_CHANNEL_SLOT << 5) | 1 # Set the F flag + block = (chr(first_byte) + _encode_number(0) + _encode_number(0)) + payload = _encode_channel_id(_CONTROL_CHANNEL_ID) + block + return create_binary_frame(payload, mask=outer_frame_mask) + + +def _parse_request_text(request_text): + request_line, header_lines = request_text.split('\r\n', 1) + + words = request_line.split(' ') + if len(words) != 3: + raise ValueError('Bad Request-Line syntax %r' % request_line) + [command, path, version] = words + if version != 'HTTP/1.1': + raise ValueError('Bad request version %r' % version) + + # email.parser.Parser() parses RFC 2822 (RFC 822) style headers. + # RFC 6455 refers RFC 2616 for handshake parsing, and RFC 2616 refers + # RFC 822. + headers = email.parser.Parser().parsestr(header_lines) + return command, path, version, headers + + +class _ControlBlock(object): + """A structure that holds parsing result of multiplexing control block. + Control block specific attributes will be added by _MuxFramePayloadParser. + (e.g. encoded_handshake will be added for AddChannelRequest and + AddChannelResponse) + """ + + def __init__(self, opcode): + self.opcode = opcode + + +class _MuxFramePayloadParser(object): + """A class that parses multiplexed frame payload.""" + + def __init__(self, payload): + self._data = payload + self._read_position = 0 + self._logger = util.get_class_logger(self) + + def read_channel_id(self): + """Reads channel id. + + Raises: + ValueError: when the payload doesn't contain + valid channel id. + """ + + remaining_length = len(self._data) - self._read_position + pos = self._read_position + if remaining_length == 0: + raise ValueError('Invalid channel id format') + + channel_id = ord(self._data[pos]) + channel_id_length = 1 + if channel_id & 0xe0 == 0xe0: + if remaining_length < 4: + raise ValueError('Invalid channel id format') + channel_id = struct.unpack('!L', + self._data[pos:pos+4])[0] & 0x1fffffff + channel_id_length = 4 + elif channel_id & 0xc0 == 0xc0: + if remaining_length < 3: + raise ValueError('Invalid channel id format') + channel_id = (((channel_id & 0x1f) << 16) + + struct.unpack('!H', self._data[pos+1:pos+3])[0]) + channel_id_length = 3 + elif channel_id & 0x80 == 0x80: + if remaining_length < 2: + raise ValueError('Invalid channel id format') + channel_id = struct.unpack('!H', + self._data[pos:pos+2])[0] & 0x3fff + channel_id_length = 2 + self._read_position += channel_id_length + + return channel_id + + def read_inner_frame(self): + """Reads an inner frame. + + Raises: + PhysicalConnectionError: when the inner frame is invalid. + """ + + if len(self._data) == self._read_position: + raise PhysicalConnectionError( + _DROP_CODE_ENCAPSULATED_FRAME_IS_TRUNCATED) + + bits = ord(self._data[self._read_position]) + self._read_position += 1 + fin = (bits & 0x80) == 0x80 + rsv1 = (bits & 0x40) == 0x40 + rsv2 = (bits & 0x20) == 0x20 + rsv3 = (bits & 0x10) == 0x10 + opcode = bits & 0xf + payload = self.remaining_data() + # Consume rest of the message which is payload data of the original + # frame. + self._read_position = len(self._data) + return fin, rsv1, rsv2, rsv3, opcode, payload + + def _read_number(self): + if self._read_position + 1 > len(self._data): + raise PhysicalConnectionError( + _DROP_CODE_INVALID_MUX_CONTROL_BLOCK, + 'Cannot read the first byte of number field') + + number = ord(self._data[self._read_position]) + if number & 0x80 == 0x80: + raise PhysicalConnectionError( + _DROP_CODE_INVALID_MUX_CONTROL_BLOCK, + 'The most significant bit of the first byte of number should ' + 'be unset') + self._read_position += 1 + pos = self._read_position + if number == 127: + if pos + 8 > len(self._data): + raise PhysicalConnectionError( + _DROP_CODE_INVALID_MUX_CONTROL_BLOCK, + 'Invalid number field') + self._read_position += 8 + number = struct.unpack('!Q', self._data[pos:pos+8])[0] + if number > 0x7FFFFFFFFFFFFFFF: + raise PhysicalConnectionError( + _DROP_CODE_INVALID_MUX_CONTROL_BLOCK, + 'Encoded number >= 2^63') + if number <= 0xFFFF: + raise PhysicalConnectionError( + _DROP_CODE_INVALID_MUX_CONTROL_BLOCK, + '%d should not be encoded by 9 bytes encoding' % number) + return number + if number == 126: + if pos + 2 > len(self._data): + raise PhysicalConnectionError( + _DROP_CODE_INVALID_MUX_CONTROL_BLOCK, + 'Invalid number field') + self._read_position += 2 + number = struct.unpack('!H', self._data[pos:pos+2])[0] + if number <= 125: + raise PhysicalConnectionError( + _DROP_CODE_INVALID_MUX_CONTROL_BLOCK, + '%d should not be encoded by 3 bytes encoding' % number) + return number + + def _read_size_and_contents(self): + """Reads data that consists of followings: + - the size of the contents encoded the same way as payload length + of the WebSocket Protocol with 1 bit padding at the head. + - the contents. + """ + + size = self._read_number() + pos = self._read_position + if pos + size > len(self._data): + raise PhysicalConnectionError( + _DROP_CODE_INVALID_MUX_CONTROL_BLOCK, + 'Cannot read %d bytes data' % size) + + self._read_position += size + return self._data[pos:pos+size] + + def _read_add_channel_request(self, first_byte, control_block): + reserved = (first_byte >> 2) & 0x7 + if reserved != 0: + raise PhysicalConnectionError( + _DROP_CODE_INVALID_MUX_CONTROL_BLOCK, + 'Reserved bits must be unset') + + # Invalid encoding will be handled by MuxHandler. + encoding = first_byte & 0x3 + try: + control_block.channel_id = self.read_channel_id() + except ValueError, e: + raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK) + control_block.encoding = encoding + encoded_handshake = self._read_size_and_contents() + control_block.encoded_handshake = encoded_handshake + return control_block + + def _read_add_channel_response(self, first_byte, control_block): + reserved = (first_byte >> 2) & 0x3 + if reserved != 0: + raise PhysicalConnectionError( + _DROP_CODE_INVALID_MUX_CONTROL_BLOCK, + 'Reserved bits must be unset') + + control_block.accepted = (first_byte >> 4) & 1 + control_block.encoding = first_byte & 0x3 + try: + control_block.channel_id = self.read_channel_id() + except ValueError, e: + raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK) + control_block.encoded_handshake = self._read_size_and_contents() + return control_block + + def _read_flow_control(self, first_byte, control_block): + reserved = first_byte & 0x1f + if reserved != 0: + raise PhysicalConnectionError( + _DROP_CODE_INVALID_MUX_CONTROL_BLOCK, + 'Reserved bits must be unset') + + try: + control_block.channel_id = self.read_channel_id() + except ValueError, e: + raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK) + control_block.send_quota = self._read_number() + return control_block + + def _read_drop_channel(self, first_byte, control_block): + reserved = first_byte & 0x1f + if reserved != 0: + raise PhysicalConnectionError( + _DROP_CODE_INVALID_MUX_CONTROL_BLOCK, + 'Reserved bits must be unset') + + try: + control_block.channel_id = self.read_channel_id() + except ValueError, e: + raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK) + reason = self._read_size_and_contents() + if len(reason) == 0: + control_block.drop_code = None + control_block.drop_message = '' + elif len(reason) >= 2: + control_block.drop_code = struct.unpack('!H', reason[:2])[0] + control_block.drop_message = reason[2:] + else: + raise PhysicalConnectionError( + _DROP_CODE_INVALID_MUX_CONTROL_BLOCK, + 'Received DropChannel that conains only 1-byte reason') + return control_block + + def _read_new_channel_slot(self, first_byte, control_block): + reserved = first_byte & 0x1e + if reserved != 0: + raise PhysicalConnectionError( + _DROP_CODE_INVALID_MUX_CONTROL_BLOCK, + 'Reserved bits must be unset') + control_block.fallback = first_byte & 1 + control_block.slots = self._read_number() + control_block.send_quota = self._read_number() + return control_block + + def read_control_blocks(self): + """Reads control block(s). + + Raises: + PhysicalConnectionError: when the payload contains invalid control + block(s). + StopIteration: when no control blocks left. + """ + + while self._read_position < len(self._data): + first_byte = ord(self._data[self._read_position]) + self._read_position += 1 + opcode = (first_byte >> 5) & 0x7 + control_block = _ControlBlock(opcode=opcode) + if opcode == _MUX_OPCODE_ADD_CHANNEL_REQUEST: + yield self._read_add_channel_request(first_byte, control_block) + elif opcode == _MUX_OPCODE_ADD_CHANNEL_RESPONSE: + yield self._read_add_channel_response( + first_byte, control_block) + elif opcode == _MUX_OPCODE_FLOW_CONTROL: + yield self._read_flow_control(first_byte, control_block) + elif opcode == _MUX_OPCODE_DROP_CHANNEL: + yield self._read_drop_channel(first_byte, control_block) + elif opcode == _MUX_OPCODE_NEW_CHANNEL_SLOT: + yield self._read_new_channel_slot(first_byte, control_block) + else: + raise PhysicalConnectionError( + _DROP_CODE_UNKNOWN_MUX_OPCODE, + 'Invalid opcode %d' % opcode) + + assert self._read_position == len(self._data) + raise StopIteration + + def remaining_data(self): + """Returns remaining data.""" + + return self._data[self._read_position:] + + +class _LogicalRequest(object): + """Mimics mod_python request.""" + + def __init__(self, channel_id, command, path, protocol, headers, + connection): + """Constructs an instance. + + Args: + channel_id: the channel id of the logical channel. + command: HTTP request command. + path: HTTP request path. + headers: HTTP headers. + connection: _LogicalConnection instance. + """ + + self.channel_id = channel_id + self.method = command + self.uri = path + self.protocol = protocol + self.headers_in = headers + self.connection = connection + self.server_terminated = False + self.client_terminated = False + + def is_https(self): + """Mimics request.is_https(). Returns False because this method is + used only by old protocols (hixie and hybi00). + """ + + return False + + +class _LogicalConnection(object): + """Mimics mod_python mp_conn.""" + + # For details, see the comment of set_read_state(). + STATE_ACTIVE = 1 + STATE_GRACEFULLY_CLOSED = 2 + STATE_TERMINATED = 3 + + def __init__(self, mux_handler, channel_id): + """Constructs an instance. + + Args: + mux_handler: _MuxHandler instance. + channel_id: channel id of this connection. + """ + + self._mux_handler = mux_handler + self._channel_id = channel_id + self._incoming_data = '' + self._write_condition = threading.Condition() + self._waiting_write_completion = False + self._read_condition = threading.Condition() + self._read_state = self.STATE_ACTIVE + + def get_local_addr(self): + """Getter to mimic mp_conn.local_addr.""" + + return self._mux_handler.physical_connection.get_local_addr() + local_addr = property(get_local_addr) + + def get_remote_addr(self): + """Getter to mimic mp_conn.remote_addr.""" + + return self._mux_handler.physical_connection.get_remote_addr() + remote_addr = property(get_remote_addr) + + def get_memorized_lines(self): + """Gets memorized lines. Not supported.""" + + raise MuxUnexpectedException('_LogicalConnection does not support ' + 'get_memorized_lines') + + def write(self, data): + """Writes data. mux_handler sends data asynchronously. The caller will + be suspended until write done. + + Args: + data: data to be written. + + Raises: + MuxUnexpectedException: when called before finishing the previous + write. + """ + + try: + self._write_condition.acquire() + if self._waiting_write_completion: + raise MuxUnexpectedException( + 'Logical connection %d is already waiting the completion ' + 'of write' % self._channel_id) + + self._waiting_write_completion = True + self._mux_handler.send_data(self._channel_id, data) + self._write_condition.wait() + finally: + self._write_condition.release() + + def write_control_data(self, data): + """Writes data via the control channel. Don't wait finishing write + because this method can be called by mux dispatcher. + + Args: + data: data to be written. + """ + + self._mux_handler.send_control_data(data) + + def notify_write_done(self): + """Called when sending data is completed.""" + + try: + self._write_condition.acquire() + if not self._waiting_write_completion: + raise MuxUnexpectedException( + 'Invalid call of notify_write_done for logical connection' + ' %d' % self._channel_id) + self._waiting_write_completion = False + self._write_condition.notify() + finally: + self._write_condition.release() + + def append_frame_data(self, frame_data): + """Appends incoming frame data. Called when mux_handler dispatches + frame data to the corresponding application. + + Args: + frame_data: incoming frame data. + """ + + self._read_condition.acquire() + self._incoming_data += frame_data + self._read_condition.notify() + self._read_condition.release() + + def read(self, length): + """Reads data. Blocks until enough data has arrived via physical + connection. + + Args: + length: length of data to be read. + Raises: + LogicalConnectionClosedException: when closing handshake for this + logical channel has been received. + ConnectionTerminatedException: when the physical connection has + closed, or an error is caused on the reader thread. + """ + + self._read_condition.acquire() + while (self._read_state == self.STATE_ACTIVE and + len(self._incoming_data) < length): + self._read_condition.wait() + + try: + if self._read_state == self.STATE_GRACEFULLY_CLOSED: + raise LogicalConnectionClosedException( + 'Logical channel %d has closed.' % self._channel_id) + elif self._read_state == self.STATE_TERMINATED: + raise ConnectionTerminatedException( + 'Receiving %d byte failed. Logical channel (%d) closed' % + (length, self._channel_id)) + + value = self._incoming_data[:length] + self._incoming_data = self._incoming_data[length:] + finally: + self._read_condition.release() + + return value + + def set_read_state(self, new_state): + """Sets the state of this connection. Called when an event for this + connection has occurred. + + Args: + new_state: state to be set. new_state must be one of followings: + - STATE_GRACEFULLY_CLOSED: when closing handshake for this + connection has been received. + - STATE_TERMINATED: when the physical connection has closed or + DropChannel of this connection has received. + """ + + self._read_condition.acquire() + self._read_state = new_state + self._read_condition.notify() + self._read_condition.release() + + +class _LogicalStream(Stream): + """Mimics the Stream class. This class interprets multiplexed WebSocket + frames. + """ + + def __init__(self, request, send_quota, receive_quota): + """Constructs an instance. + + Args: + request: _LogicalRequest instance. + send_quota: Initial send quota. + receive_quota: Initial receive quota. + """ + + # TODO(bashi): Support frame filters. + stream_options = StreamOptions() + # Physical stream is responsible for masking. + stream_options.unmask_receive = False + # Control frames can be fragmented on logical channel. + stream_options.allow_fragmented_control_frame = True + Stream.__init__(self, request, stream_options) + self._send_quota = send_quota + self._send_quota_condition = threading.Condition() + self._receive_quota = receive_quota + self._write_inner_frame_semaphore = threading.Semaphore() + + def _create_inner_frame(self, opcode, payload, end=True): + # TODO(bashi): Support extensions that use reserved bits. + first_byte = (end << 7) | opcode + return (_encode_channel_id(self._request.channel_id) + + chr(first_byte) + payload) + + def _write_inner_frame(self, opcode, payload, end=True): + payload_length = len(payload) + write_position = 0 + + try: + # An inner frame will be fragmented if there is no enough send + # quota. This semaphore ensures that fragmented inner frames are + # sent in order on the logical channel. + # Note that frames that come from other logical channels or + # multiplexing control blocks can be inserted between fragmented + # inner frames on the physical channel. + self._write_inner_frame_semaphore.acquire() + while write_position < payload_length: + try: + self._send_quota_condition.acquire() + while self._send_quota == 0: + self._logger.debug( + 'No quota. Waiting FlowControl message for %d.' % + self._request.channel_id) + self._send_quota_condition.wait() + + remaining = payload_length - write_position + write_length = min(self._send_quota, remaining) + inner_frame_end = ( + end and + (write_position + write_length == payload_length)) + + inner_frame = self._create_inner_frame( + opcode, + payload[write_position:write_position+write_length], + inner_frame_end) + frame_data = self._writer.build( + inner_frame, end=True, binary=True) + self._send_quota -= write_length + self._logger.debug('Consumed quota=%d, remaining=%d' % + (write_length, self._send_quota)) + finally: + self._send_quota_condition.release() + + # Writing data will block the worker so we need to release + # _send_quota_condition before writing. + self._logger.debug('Sending inner frame: %r' % frame_data) + self._request.connection.write(frame_data) + write_position += write_length + + opcode = common.OPCODE_CONTINUATION + + except ValueError, e: + raise BadOperationException(e) + finally: + self._write_inner_frame_semaphore.release() + + def replenish_send_quota(self, send_quota): + """Replenish send quota.""" + + self._send_quota_condition.acquire() + self._send_quota += send_quota + self._logger.debug('Replenished send quota for channel id %d: %d' % + (self._request.channel_id, self._send_quota)) + self._send_quota_condition.notify() + self._send_quota_condition.release() + + def consume_receive_quota(self, amount): + """Consumes receive quota. Returns False on failure.""" + + if self._receive_quota < amount: + self._logger.debug('Violate quota on channel id %d: %d < %d' % + (self._request.channel_id, + self._receive_quota, amount)) + return False + self._receive_quota -= amount + return True + + def send_message(self, message, end=True, binary=False): + """Override Stream.send_message.""" + + if self._request.server_terminated: + raise BadOperationException( + 'Requested send_message after sending out a closing handshake') + + if binary and isinstance(message, unicode): + raise BadOperationException( + 'Message for binary frame must be instance of str') + + if binary: + opcode = common.OPCODE_BINARY + else: + opcode = common.OPCODE_TEXT + message = message.encode('utf-8') + + self._write_inner_frame(opcode, message, end) + + def _receive_frame(self): + """Overrides Stream._receive_frame. + + In addition to call Stream._receive_frame, this method adds the amount + of payload to receiving quota and sends FlowControl to the client. + We need to do it here because Stream.receive_message() handles + control frames internally. + """ + + opcode, payload, fin, rsv1, rsv2, rsv3 = Stream._receive_frame(self) + amount = len(payload) + self._receive_quota += amount + frame_data = _create_flow_control(self._request.channel_id, + amount) + self._logger.debug('Sending flow control for %d, replenished=%d' % + (self._request.channel_id, amount)) + self._request.connection.write_control_data(frame_data) + return opcode, payload, fin, rsv1, rsv2, rsv3 + + def receive_message(self): + """Overrides Stream.receive_message.""" + + # Just call Stream.receive_message(), but catch + # LogicalConnectionClosedException, which is raised when the logical + # connection has closed gracefully. + try: + return Stream.receive_message(self) + except LogicalConnectionClosedException, e: + self._logger.debug('%s', e) + return None + + def _send_closing_handshake(self, code, reason): + """Overrides Stream._send_closing_handshake.""" + + body = create_closing_handshake_body(code, reason) + self._logger.debug('Sending closing handshake for %d: (%r, %r)' % + (self._request.channel_id, code, reason)) + self._write_inner_frame(common.OPCODE_CLOSE, body, end=True) + + self._request.server_terminated = True + + def send_ping(self, body=''): + """Overrides Stream.send_ping""" + + self._logger.debug('Sending ping on logical channel %d: %r' % + (self._request.channel_id, body)) + self._write_inner_frame(common.OPCODE_PING, body, end=True) + + self._ping_queue.append(body) + + def _send_pong(self, body): + """Overrides Stream._send_pong""" + + self._logger.debug('Sending pong on logical channel %d: %r' % + (self._request.channel_id, body)) + self._write_inner_frame(common.OPCODE_PONG, body, end=True) + + def close_connection(self, code=common.STATUS_NORMAL_CLOSURE, reason=''): + """Overrides Stream.close_connection.""" + + # TODO(bashi): Implement + self._logger.debug('Closing logical connection %d' % + self._request.channel_id) + self._request.server_terminated = True + + def _drain_received_data(self): + """Overrides Stream._drain_received_data. Nothing need to be done for + logical channel. + """ + + pass + + +class _OutgoingData(object): + """A structure that holds data to be sent via physical connection and + origin of the data. + """ + + def __init__(self, channel_id, data): + self.channel_id = channel_id + self.data = data + + +class _PhysicalConnectionWriter(threading.Thread): + """A thread that is responsible for writing data to physical connection. + + TODO(bashi): Make sure there is no thread-safety problem when the reader + thread reads data from the same socket at a time. + """ + + def __init__(self, mux_handler): + """Constructs an instance. + + Args: + mux_handler: _MuxHandler instance. + """ + + threading.Thread.__init__(self) + self._logger = util.get_class_logger(self) + self._mux_handler = mux_handler + self.setDaemon(True) + self._stop_requested = False + self._deque = collections.deque() + self._deque_condition = threading.Condition() + + def put_outgoing_data(self, data): + """Puts outgoing data. + + Args: + data: _OutgoingData instance. + + Raises: + BadOperationException: when the thread has been requested to + terminate. + """ + + try: + self._deque_condition.acquire() + if self._stop_requested: + raise BadOperationException('Cannot write data anymore') + + self._deque.append(data) + self._deque_condition.notify() + finally: + self._deque_condition.release() + + def _write_data(self, outgoing_data): + try: + self._mux_handler.physical_connection.write(outgoing_data.data) + except Exception, e: + util.prepend_message_to_exception( + 'Failed to send message to %r: ' % + (self._mux_handler.physical_connection.remote_addr,), e) + raise + + # TODO(bashi): It would be better to block the thread that sends + # control data as well. + if outgoing_data.channel_id != _CONTROL_CHANNEL_ID: + self._mux_handler.notify_write_done(outgoing_data.channel_id) + + def run(self): + self._deque_condition.acquire() + while not self._stop_requested: + if len(self._deque) == 0: + self._deque_condition.wait() + continue + + outgoing_data = self._deque.popleft() + self._deque_condition.release() + self._write_data(outgoing_data) + self._deque_condition.acquire() + + # Flush deque + try: + while len(self._deque) > 0: + outgoing_data = self._deque.popleft() + self._write_data(outgoing_data) + finally: + self._deque_condition.release() + + def stop(self): + """Stops the writer thread.""" + + self._deque_condition.acquire() + self._stop_requested = True + self._deque_condition.notify() + self._deque_condition.release() + + +class _PhysicalConnectionReader(threading.Thread): + """A thread that is responsible for reading data from physical connection. + """ + + def __init__(self, mux_handler): + """Constructs an instance. + + Args: + mux_handler: _MuxHandler instance. + """ + + threading.Thread.__init__(self) + self._logger = util.get_class_logger(self) + self._mux_handler = mux_handler + self.setDaemon(True) + + def run(self): + while True: + try: + physical_stream = self._mux_handler.physical_stream + message = physical_stream.receive_message() + if message is None: + break + # Below happens only when a data message is received. + opcode = physical_stream.get_last_received_opcode() + if opcode != common.OPCODE_BINARY: + self._mux_handler.fail_physical_connection( + _DROP_CODE_INVALID_ENCAPSULATING_MESSAGE, + 'Received a text message on physical connection') + break + + except ConnectionTerminatedException, e: + self._logger.debug('%s', e) + break + + try: + self._mux_handler.dispatch_message(message) + except PhysicalConnectionError, e: + self._mux_handler.fail_physical_connection( + e.drop_code, e.message) + break + except LogicalChannelError, e: + self._mux_handler.fail_logical_channel( + e.channel_id, e.drop_code, e.message) + except Exception, e: + self._logger.debug(traceback.format_exc()) + break + + self._mux_handler.notify_reader_done() + + +class _Worker(threading.Thread): + """A thread that is responsible for running the corresponding application + handler. + """ + + def __init__(self, mux_handler, request): + """Constructs an instance. + + Args: + mux_handler: _MuxHandler instance. + request: _LogicalRequest instance. + """ + + threading.Thread.__init__(self) + self._logger = util.get_class_logger(self) + self._mux_handler = mux_handler + self._request = request + self.setDaemon(True) + + def run(self): + self._logger.debug('Logical channel worker started. (id=%d)' % + self._request.channel_id) + try: + # Non-critical exceptions will be handled by dispatcher. + self._mux_handler.dispatcher.transfer_data(self._request) + finally: + self._mux_handler.notify_worker_done(self._request.channel_id) + + +class _MuxHandshaker(hybi.Handshaker): + """Opening handshake processor for multiplexing.""" + + _DUMMY_WEBSOCKET_KEY = 'dGhlIHNhbXBsZSBub25jZQ==' + + def __init__(self, request, dispatcher, send_quota, receive_quota): + """Constructs an instance. + Args: + request: _LogicalRequest instance. + dispatcher: Dispatcher instance (dispatch.Dispatcher). + send_quota: Initial send quota. + receive_quota: Initial receive quota. + """ + + hybi.Handshaker.__init__(self, request, dispatcher) + self._send_quota = send_quota + self._receive_quota = receive_quota + + # Append headers which should not be included in handshake field of + # AddChannelRequest. + # TODO(bashi): Make sure whether we should raise exception when + # these headers are included already. + request.headers_in[common.UPGRADE_HEADER] = ( + common.WEBSOCKET_UPGRADE_TYPE) + request.headers_in[common.CONNECTION_HEADER] = ( + common.UPGRADE_CONNECTION_TYPE) + request.headers_in[common.SEC_WEBSOCKET_VERSION_HEADER] = ( + str(common.VERSION_HYBI_LATEST)) + request.headers_in[common.SEC_WEBSOCKET_KEY_HEADER] = ( + self._DUMMY_WEBSOCKET_KEY) + + def _create_stream(self, stream_options): + """Override hybi.Handshaker._create_stream.""" + + self._logger.debug('Creating logical stream for %d' % + self._request.channel_id) + return _LogicalStream(self._request, self._send_quota, + self._receive_quota) + + def _create_handshake_response(self, accept): + """Override hybi._create_handshake_response.""" + + response = [] + + response.append('HTTP/1.1 101 Switching Protocols\r\n') + + # Upgrade, Connection and Sec-WebSocket-Accept should be excluded. + if self._request.ws_protocol is not None: + response.append('%s: %s\r\n' % ( + common.SEC_WEBSOCKET_PROTOCOL_HEADER, + self._request.ws_protocol)) + if (self._request.ws_extensions is not None and + len(self._request.ws_extensions) != 0): + response.append('%s: %s\r\n' % ( + common.SEC_WEBSOCKET_EXTENSIONS_HEADER, + common.format_extensions(self._request.ws_extensions))) + response.append('\r\n') + + return ''.join(response) + + def _send_handshake(self, accept): + """Override hybi.Handshaker._send_handshake.""" + + # Don't send handshake response for the default channel + if self._request.channel_id == _DEFAULT_CHANNEL_ID: + return + + handshake_response = self._create_handshake_response(accept) + frame_data = _create_add_channel_response( + self._request.channel_id, + handshake_response) + self._logger.debug('Sending handshake response for %d: %r' % + (self._request.channel_id, frame_data)) + self._request.connection.write_control_data(frame_data) + + +class _LogicalChannelData(object): + """A structure that holds information about logical channel. + """ + + def __init__(self, request, worker): + self.request = request + self.worker = worker + self.drop_code = _DROP_CODE_NORMAL_CLOSURE + self.drop_message = '' + + +class _HandshakeDeltaBase(object): + """A class that holds information for delta-encoded handshake.""" + + def __init__(self, headers): + self._headers = headers + + def create_headers(self, delta=None): + """Creates request headers for an AddChannelRequest that has + delta-encoded handshake. + + Args: + delta: headers should be overridden. + """ + + headers = copy.copy(self._headers) + if delta: + for key, value in delta.items(): + # The spec requires that a header with an empty value is + # removed from the delta base. + if len(value) == 0 and headers.has_key(key): + del headers[key] + else: + headers[key] = value + # TODO(bashi): Support extensions + headers['Sec-WebSocket-Extensions'] = '' + return headers + + +class _MuxHandler(object): + """Multiplexing handler. When a handler starts, it launches three + threads; the reader thread, the writer thread, and a worker thread. + + The reader thread reads data from the physical stream, i.e., the + ws_stream object of the underlying websocket connection. The reader + thread interprets multiplexed frames and dispatches them to logical + channels. Methods of this class are mostly called by the reader thread. + + The writer thread sends multiplexed frames which are created by + logical channels via the physical connection. + + The worker thread launched at the starting point handles the + "Implicitly Opened Connection". If multiplexing handler receives + an AddChannelRequest and accepts it, the handler will launch a new worker + thread and dispatch the request to it. + """ + + def __init__(self, request, dispatcher): + """Constructs an instance. + + Args: + request: mod_python request of the physical connection. + dispatcher: Dispatcher instance (dispatch.Dispatcher). + """ + + self.original_request = request + self.dispatcher = dispatcher + self.physical_connection = request.connection + self.physical_stream = request.ws_stream + self._logger = util.get_class_logger(self) + self._logical_channels = {} + self._logical_channels_condition = threading.Condition() + # Holds client's initial quota + self._channel_slots = collections.deque() + self._handshake_base = None + self._worker_done_notify_received = False + self._reader = None + self._writer = None + + def start(self): + """Starts the handler. + + Raises: + MuxUnexpectedException: when the handler already started, or when + opening handshake of the default channel fails. + """ + + if self._reader or self._writer: + raise MuxUnexpectedException('MuxHandler already started') + + self._reader = _PhysicalConnectionReader(self) + self._writer = _PhysicalConnectionWriter(self) + self._reader.start() + self._writer.start() + + # Create "Implicitly Opened Connection". + logical_connection = _LogicalConnection(self, _DEFAULT_CHANNEL_ID) + self._handshake_base = _HandshakeDeltaBase( + self.original_request.headers_in) + logical_request = _LogicalRequest( + _DEFAULT_CHANNEL_ID, + self.original_request.method, + self.original_request.uri, + self.original_request.protocol, + self._handshake_base.create_headers(), + logical_connection) + # Client's send quota for the implicitly opened connection is zero, + # but we will send FlowControl later so set the initial quota to + # _INITIAL_QUOTA_FOR_CLIENT. + self._channel_slots.append(_INITIAL_QUOTA_FOR_CLIENT) + if not self._do_handshake_for_logical_request( + logical_request, send_quota=self.original_request.mux_quota): + raise MuxUnexpectedException( + 'Failed handshake on the default channel id') + self._add_logical_channel(logical_request) + + # Send FlowControl for the implicitly opened connection. + frame_data = _create_flow_control(_DEFAULT_CHANNEL_ID, + _INITIAL_QUOTA_FOR_CLIENT) + logical_request.connection.write_control_data(frame_data) + + def add_channel_slots(self, slots, send_quota): + """Adds channel slots. + + Args: + slots: number of slots to be added. + send_quota: initial send quota for slots. + """ + + self._channel_slots.extend([send_quota] * slots) + # Send NewChannelSlot to client. + frame_data = _create_new_channel_slot(slots, send_quota) + self.send_control_data(frame_data) + + def wait_until_done(self, timeout=None): + """Waits until all workers are done. Returns False when timeout has + occurred. Returns True on success. + + Args: + timeout: timeout in sec. + """ + + self._logical_channels_condition.acquire() + try: + while len(self._logical_channels) > 0: + self._logger.debug('Waiting workers(%d)...' % + len(self._logical_channels)) + self._worker_done_notify_received = False + self._logical_channels_condition.wait(timeout) + if not self._worker_done_notify_received: + self._logger.debug('Waiting worker(s) timed out') + return False + + finally: + self._logical_channels_condition.release() + + # Flush pending outgoing data + self._writer.stop() + self._writer.join() + + return True + + def notify_write_done(self, channel_id): + """Called by the writer thread when a write operation has done. + + Args: + channel_id: objective channel id. + """ + + try: + self._logical_channels_condition.acquire() + if channel_id in self._logical_channels: + channel_data = self._logical_channels[channel_id] + channel_data.request.connection.notify_write_done() + else: + self._logger.debug('Seems that logical channel for %d has gone' + % channel_id) + finally: + self._logical_channels_condition.release() + + def send_control_data(self, data): + """Sends data via the control channel. + + Args: + data: data to be sent. + """ + + self._writer.put_outgoing_data(_OutgoingData( + channel_id=_CONTROL_CHANNEL_ID, data=data)) + + def send_data(self, channel_id, data): + """Sends data via given logical channel. This method is called by + worker threads. + + Args: + channel_id: objective channel id. + data: data to be sent. + """ + + self._writer.put_outgoing_data(_OutgoingData( + channel_id=channel_id, data=data)) + + def _send_drop_channel(self, channel_id, code=None, message=''): + frame_data = _create_drop_channel(channel_id, code, message) + self._logger.debug( + 'Sending drop channel for channel id %d' % channel_id) + self.send_control_data(frame_data) + + def _send_error_add_channel_response(self, channel_id, status=None): + if status is None: + status = common.HTTP_STATUS_BAD_REQUEST + + if status in _HTTP_BAD_RESPONSE_MESSAGES: + message = _HTTP_BAD_RESPONSE_MESSAGES[status] + else: + self._logger.debug('Response message for %d is not found' % status) + message = '???' + + response = 'HTTP/1.1 %d %s\r\n\r\n' % (status, message) + frame_data = _create_add_channel_response(channel_id, + encoded_handshake=response, + encoding=0, rejected=True) + self.send_control_data(frame_data) + + def _create_logical_request(self, block): + if block.channel_id == _CONTROL_CHANNEL_ID: + # TODO(bashi): Raise PhysicalConnectionError with code 2006 + # instead of MuxUnexpectedException. + raise MuxUnexpectedException( + 'Received the control channel id (0) as objective channel ' + 'id for AddChannel') + + if block.encoding > _HANDSHAKE_ENCODING_DELTA: + raise PhysicalConnectionError( + _DROP_CODE_UNKNOWN_REQUEST_ENCODING) + + method, path, version, headers = _parse_request_text( + block.encoded_handshake) + if block.encoding == _HANDSHAKE_ENCODING_DELTA: + headers = self._handshake_base.create_headers(headers) + + connection = _LogicalConnection(self, block.channel_id) + request = _LogicalRequest(block.channel_id, method, path, version, + headers, connection) + return request + + def _do_handshake_for_logical_request(self, request, send_quota=0): + try: + receive_quota = self._channel_slots.popleft() + except IndexError: + raise LogicalChannelError( + request.channel_id, _DROP_CODE_NEW_CHANNEL_SLOT_VIOLATION) + + handshaker = _MuxHandshaker(request, self.dispatcher, + send_quota, receive_quota) + try: + handshaker.do_handshake() + except handshake.VersionException, e: + self._logger.info('%s', e) + self._send_error_add_channel_response( + request.channel_id, status=common.HTTP_STATUS_BAD_REQUEST) + return False + except handshake.HandshakeException, e: + # TODO(bashi): Should we _Fail the Logical Channel_ with 3001 + # instead? + self._logger.info('%s', e) + self._send_error_add_channel_response(request.channel_id, + status=e.status) + return False + except handshake.AbortedByUserException, e: + self._logger.info('%s', e) + self._send_error_add_channel_response(request.channel_id) + return False + + return True + + def _add_logical_channel(self, logical_request): + try: + self._logical_channels_condition.acquire() + if logical_request.channel_id in self._logical_channels: + self._logger.debug('Channel id %d already exists' % + logical_request.channel_id) + raise PhysicalConnectionError( + _DROP_CODE_CHANNEL_ALREADY_EXISTS, + 'Channel id %d already exists' % + logical_request.channel_id) + worker = _Worker(self, logical_request) + channel_data = _LogicalChannelData(logical_request, worker) + self._logical_channels[logical_request.channel_id] = channel_data + worker.start() + finally: + self._logical_channels_condition.release() + + def _process_add_channel_request(self, block): + try: + logical_request = self._create_logical_request(block) + except ValueError, e: + self._logger.debug('Failed to create logical request: %r' % e) + self._send_error_add_channel_response( + block.channel_id, status=common.HTTP_STATUS_BAD_REQUEST) + return + if self._do_handshake_for_logical_request(logical_request): + if block.encoding == _HANDSHAKE_ENCODING_IDENTITY: + # Update handshake base. + # TODO(bashi): Make sure this is the right place to update + # handshake base. + self._handshake_base = _HandshakeDeltaBase( + logical_request.headers_in) + self._add_logical_channel(logical_request) + else: + self._send_error_add_channel_response( + block.channel_id, status=common.HTTP_STATUS_BAD_REQUEST) + + def _process_flow_control(self, block): + try: + self._logical_channels_condition.acquire() + if not block.channel_id in self._logical_channels: + return + channel_data = self._logical_channels[block.channel_id] + channel_data.request.ws_stream.replenish_send_quota( + block.send_quota) + finally: + self._logical_channels_condition.release() + + def _process_drop_channel(self, block): + self._logger.debug( + 'DropChannel received for %d: code=%r, reason=%r' % + (block.channel_id, block.drop_code, block.drop_message)) + try: + self._logical_channels_condition.acquire() + if not block.channel_id in self._logical_channels: + return + channel_data = self._logical_channels[block.channel_id] + channel_data.drop_code = _DROP_CODE_ACKNOWLEDGED + # Close the logical channel + channel_data.request.connection.set_read_state( + _LogicalConnection.STATE_TERMINATED) + finally: + self._logical_channels_condition.release() + + def _process_control_blocks(self, parser): + for control_block in parser.read_control_blocks(): + opcode = control_block.opcode + self._logger.debug('control block received, opcode: %d' % opcode) + if opcode == _MUX_OPCODE_ADD_CHANNEL_REQUEST: + self._process_add_channel_request(control_block) + elif opcode == _MUX_OPCODE_ADD_CHANNEL_RESPONSE: + raise PhysicalConnectionError( + _DROP_CODE_INVALID_MUX_CONTROL_BLOCK, + 'Received AddChannelResponse') + elif opcode == _MUX_OPCODE_FLOW_CONTROL: + self._process_flow_control(control_block) + elif opcode == _MUX_OPCODE_DROP_CHANNEL: + self._process_drop_channel(control_block) + elif opcode == _MUX_OPCODE_NEW_CHANNEL_SLOT: + raise PhysicalConnectionError( + _DROP_CODE_INVALID_MUX_CONTROL_BLOCK, + 'Received NewChannelSlot') + else: + raise MuxUnexpectedException( + 'Unexpected opcode %r' % opcode) + + def _process_logical_frame(self, channel_id, parser): + self._logger.debug('Received a frame. channel id=%d' % channel_id) + try: + self._logical_channels_condition.acquire() + if not channel_id in self._logical_channels: + # We must ignore the message for an inactive channel. + return + channel_data = self._logical_channels[channel_id] + fin, rsv1, rsv2, rsv3, opcode, payload = parser.read_inner_frame() + if not channel_data.request.ws_stream.consume_receive_quota( + len(payload)): + # The client violates quota. Close logical channel. + raise LogicalChannelError( + channel_id, _DROP_CODE_SEND_QUOTA_VIOLATION) + header = create_header(opcode, len(payload), fin, rsv1, rsv2, rsv3, + mask=False) + frame_data = header + payload + channel_data.request.connection.append_frame_data(frame_data) + finally: + self._logical_channels_condition.release() + + def dispatch_message(self, message): + """Dispatches message. The reader thread calls this method. + + Args: + message: a message that contains encapsulated frame. + Raises: + PhysicalConnectionError: if the message contains physical + connection level errors. + LogicalChannelError: if the message contains logical channel + level errors. + """ + + parser = _MuxFramePayloadParser(message) + try: + channel_id = parser.read_channel_id() + except ValueError, e: + raise PhysicalConnectionError(_DROP_CODE_CHANNEL_ID_TRUNCATED) + if channel_id == _CONTROL_CHANNEL_ID: + self._process_control_blocks(parser) + else: + self._process_logical_frame(channel_id, parser) + + def notify_worker_done(self, channel_id): + """Called when a worker has finished. + + Args: + channel_id: channel id corresponded with the worker. + """ + + self._logger.debug('Worker for channel id %d terminated' % channel_id) + try: + self._logical_channels_condition.acquire() + if not channel_id in self._logical_channels: + raise MuxUnexpectedException( + 'Channel id %d not found' % channel_id) + channel_data = self._logical_channels.pop(channel_id) + finally: + self._worker_done_notify_received = True + self._logical_channels_condition.notify() + self._logical_channels_condition.release() + + if not channel_data.request.server_terminated: + self._send_drop_channel( + channel_id, code=channel_data.drop_code, + message=channel_data.drop_message) + + def notify_reader_done(self): + """This method is called by the reader thread when the reader has + finished. + """ + + # Terminate all logical connections + self._logger.debug('termiating all logical connections...') + self._logical_channels_condition.acquire() + for channel_data in self._logical_channels.values(): + try: + channel_data.request.connection.set_read_state( + _LogicalConnection.STATE_TERMINATED) + except Exception: + pass + self._logical_channels_condition.release() + + def fail_physical_connection(self, code, message): + """Fail the physical connection. + + Args: + code: drop reason code. + message: drop message. + """ + + self._logger.debug('Failing the physical connection...') + self._send_drop_channel(_CONTROL_CHANNEL_ID, code, message) + self.physical_stream.close_connection( + common.STATUS_INTERNAL_ENDPOINT_ERROR) + + def fail_logical_channel(self, channel_id, code, message): + """Fail a logical channel. + + Args: + channel_id: channel id. + code: drop reason code. + message: drop message. + """ + + self._logger.debug('Failing logical channel %d...' % channel_id) + try: + self._logical_channels_condition.acquire() + if channel_id in self._logical_channels: + channel_data = self._logical_channels[channel_id] + # Close the logical channel. notify_worker_done() will be + # called later and it will send DropChannel. + channel_data.drop_code = code + channel_data.drop_message = message + channel_data.request.connection.set_read_state( + _LogicalConnection.STATE_TERMINATED) + else: + self._send_drop_channel(channel_id, code, message) + finally: + self._logical_channels_condition.release() + + +def use_mux(request): + return hasattr(request, 'mux') and request.mux + + +def start(request, dispatcher): + mux_handler = _MuxHandler(request, dispatcher) + mux_handler.start() + + mux_handler.add_channel_slots(_INITIAL_NUMBER_OF_CHANNEL_SLOTS, + _INITIAL_QUOTA_FOR_CLIENT) + + mux_handler.wait_until_done() + + +# vi:sts=4 sw=4 et diff --git a/module/lib/mod_pywebsocket/standalone.py b/module/lib/mod_pywebsocket/standalone.py new file mode 100755 index 000000000..07a33d9c9 --- /dev/null +++ b/module/lib/mod_pywebsocket/standalone.py @@ -0,0 +1,998 @@ +#!/usr/bin/env python +# +# Copyright 2012, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +"""Standalone WebSocket server. + +Use this file to launch pywebsocket without Apache HTTP Server. + + +BASIC USAGE + +Go to the src directory and run + + $ python mod_pywebsocket/standalone.py [-p <ws_port>] + [-w <websock_handlers>] + [-d <document_root>] + +<ws_port> is the port number to use for ws:// connection. + +<document_root> is the path to the root directory of HTML files. + +<websock_handlers> is the path to the root directory of WebSocket handlers. +If not specified, <document_root> will be used. See __init__.py (or +run $ pydoc mod_pywebsocket) for how to write WebSocket handlers. + +For more detail and other options, run + + $ python mod_pywebsocket/standalone.py --help + +or see _build_option_parser method below. + +For trouble shooting, adding "--log_level debug" might help you. + + +TRY DEMO + +Go to the src directory and run + + $ python standalone.py -d example + +to launch pywebsocket with the sample handler and html on port 80. Open +http://localhost/console.html, click the connect button, type something into +the text box next to the send button and click the send button. If everything +is working, you'll see the message you typed echoed by the server. + + +SUPPORTING TLS + +To support TLS, run standalone.py with -t, -k, and -c options. + + +SUPPORTING CLIENT AUTHENTICATION + +To support client authentication with TLS, run standalone.py with -t, -k, -c, +and --tls-client-auth, and --tls-client-ca options. + +E.g., $./standalone.py -d ../example -p 10443 -t -c ../test/cert/cert.pem -k +../test/cert/key.pem --tls-client-auth --tls-client-ca=../test/cert/cacert.pem + + +CONFIGURATION FILE + +You can also write a configuration file and use it by specifying the path to +the configuration file by --config option. Please write a configuration file +following the documentation of the Python ConfigParser library. Name of each +entry must be the long version argument name. E.g. to set log level to debug, +add the following line: + +log_level=debug + +For options which doesn't take value, please add some fake value. E.g. for +--tls option, add the following line: + +tls=True + +Note that tls will be enabled even if you write tls=False as the value part is +fake. + +When both a command line argument and a configuration file entry are set for +the same configuration item, the command line value will override one in the +configuration file. + + +THREADING + +This server is derived from SocketServer.ThreadingMixIn. Hence a thread is +used for each request. + + +SECURITY WARNING + +This uses CGIHTTPServer and CGIHTTPServer is not secure. +It may execute arbitrary Python code or external programs. It should not be +used outside a firewall. +""" + +import BaseHTTPServer +import CGIHTTPServer +import SimpleHTTPServer +import SocketServer +import ConfigParser +import base64 +import httplib +import logging +import logging.handlers +import optparse +import os +import re +import select +import socket +import sys +import threading +import time + +_HAS_SSL = False +_HAS_OPEN_SSL = False +try: + import ssl + _HAS_SSL = True +except ImportError: + try: + import OpenSSL.SSL + _HAS_OPEN_SSL = True + except ImportError: + pass + +from mod_pywebsocket import common +from mod_pywebsocket import dispatch +from mod_pywebsocket import handshake +from mod_pywebsocket import http_header_util +from mod_pywebsocket import memorizingfile +from mod_pywebsocket import util + + +_DEFAULT_LOG_MAX_BYTES = 1024 * 256 +_DEFAULT_LOG_BACKUP_COUNT = 5 + +_DEFAULT_REQUEST_QUEUE_SIZE = 128 + +# 1024 is practically large enough to contain WebSocket handshake lines. +_MAX_MEMORIZED_LINES = 1024 + + +class _StandaloneConnection(object): + """Mimic mod_python mp_conn.""" + + def __init__(self, request_handler): + """Construct an instance. + + Args: + request_handler: A WebSocketRequestHandler instance. + """ + + self._request_handler = request_handler + + def get_local_addr(self): + """Getter to mimic mp_conn.local_addr.""" + + return (self._request_handler.server.server_name, + self._request_handler.server.server_port) + local_addr = property(get_local_addr) + + def get_remote_addr(self): + """Getter to mimic mp_conn.remote_addr. + + Setting the property in __init__ won't work because the request + handler is not initialized yet there.""" + + return self._request_handler.client_address + remote_addr = property(get_remote_addr) + + def write(self, data): + """Mimic mp_conn.write().""" + + return self._request_handler.wfile.write(data) + + def read(self, length): + """Mimic mp_conn.read().""" + + return self._request_handler.rfile.read(length) + + def get_memorized_lines(self): + """Get memorized lines.""" + + return self._request_handler.rfile.get_memorized_lines() + + +class _StandaloneRequest(object): + """Mimic mod_python request.""" + + def __init__(self, request_handler, use_tls): + """Construct an instance. + + Args: + request_handler: A WebSocketRequestHandler instance. + """ + + self._logger = util.get_class_logger(self) + + self._request_handler = request_handler + self.connection = _StandaloneConnection(request_handler) + self._use_tls = use_tls + self.headers_in = request_handler.headers + + def get_uri(self): + """Getter to mimic request.uri.""" + + return self._request_handler.path + uri = property(get_uri) + + def get_method(self): + """Getter to mimic request.method.""" + + return self._request_handler.command + method = property(get_method) + + def get_protocol(self): + """Getter to mimic request.protocol.""" + + return self._request_handler.request_version + protocol = property(get_protocol) + + def is_https(self): + """Mimic request.is_https().""" + + return self._use_tls + + def _drain_received_data(self): + """Don't use this method from WebSocket handler. Drains unread data + in the receive buffer. + """ + + raw_socket = self._request_handler.connection + drained_data = util.drain_received_data(raw_socket) + + if drained_data: + self._logger.debug( + 'Drained data following close frame: %r', drained_data) + + +class _StandaloneSSLConnection(object): + """A wrapper class for OpenSSL.SSL.Connection to provide makefile method + which is not supported by the class. + """ + + def __init__(self, connection): + self._connection = connection + + def __getattribute__(self, name): + if name in ('_connection', 'makefile'): + return object.__getattribute__(self, name) + return self._connection.__getattribute__(name) + + def __setattr__(self, name, value): + if name in ('_connection', 'makefile'): + return object.__setattr__(self, name, value) + return self._connection.__setattr__(name, value) + + def makefile(self, mode='r', bufsize=-1): + return socket._fileobject(self._connection, mode, bufsize) + + +def _alias_handlers(dispatcher, websock_handlers_map_file): + """Set aliases specified in websock_handler_map_file in dispatcher. + + Args: + dispatcher: dispatch.Dispatcher instance + websock_handler_map_file: alias map file + """ + + fp = open(websock_handlers_map_file) + try: + for line in fp: + if line[0] == '#' or line.isspace(): + continue + m = re.match('(\S+)\s+(\S+)', line) + if not m: + logging.warning('Wrong format in map file:' + line) + continue + try: + dispatcher.add_resource_path_alias( + m.group(1), m.group(2)) + except dispatch.DispatchException, e: + logging.error(str(e)) + finally: + fp.close() + + +class WebSocketServer(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer): + """HTTPServer specialized for WebSocket.""" + + # Overrides SocketServer.ThreadingMixIn.daemon_threads + daemon_threads = True + # Overrides BaseHTTPServer.HTTPServer.allow_reuse_address + allow_reuse_address = True + + def __init__(self, options): + """Override SocketServer.TCPServer.__init__ to set SSL enabled + socket object to self.socket before server_bind and server_activate, + if necessary. + """ + + # Share a Dispatcher among request handlers to save time for + # instantiation. Dispatcher can be shared because it is thread-safe. + options.dispatcher = dispatch.Dispatcher( + options.websock_handlers, + options.scan_dir, + options.allow_handlers_outside_root_dir) + if options.websock_handlers_map_file: + _alias_handlers(options.dispatcher, + options.websock_handlers_map_file) + warnings = options.dispatcher.source_warnings() + if warnings: + for warning in warnings: + logging.warning('mod_pywebsocket: %s' % warning) + + self._logger = util.get_class_logger(self) + + self.request_queue_size = options.request_queue_size + self.__ws_is_shut_down = threading.Event() + self.__ws_serving = False + + SocketServer.BaseServer.__init__( + self, (options.server_host, options.port), WebSocketRequestHandler) + + # Expose the options object to allow handler objects access it. We name + # it with websocket_ prefix to avoid conflict. + self.websocket_server_options = options + + self._create_sockets() + self.server_bind() + self.server_activate() + + def _create_sockets(self): + self.server_name, self.server_port = self.server_address + self._sockets = [] + if not self.server_name: + # On platforms that doesn't support IPv6, the first bind fails. + # On platforms that supports IPv6 + # - If it binds both IPv4 and IPv6 on call with AF_INET6, the + # first bind succeeds and the second fails (we'll see 'Address + # already in use' error). + # - If it binds only IPv6 on call with AF_INET6, both call are + # expected to succeed to listen both protocol. + addrinfo_array = [ + (socket.AF_INET6, socket.SOCK_STREAM, '', '', ''), + (socket.AF_INET, socket.SOCK_STREAM, '', '', '')] + else: + addrinfo_array = socket.getaddrinfo(self.server_name, + self.server_port, + socket.AF_UNSPEC, + socket.SOCK_STREAM, + socket.IPPROTO_TCP) + for addrinfo in addrinfo_array: + self._logger.info('Create socket on: %r', addrinfo) + family, socktype, proto, canonname, sockaddr = addrinfo + try: + socket_ = socket.socket(family, socktype) + except Exception, e: + self._logger.info('Skip by failure: %r', e) + continue + if self.websocket_server_options.use_tls: + if _HAS_SSL: + if self.websocket_server_options.tls_client_auth: + client_cert_ = ssl.CERT_REQUIRED + else: + client_cert_ = ssl.CERT_NONE + socket_ = ssl.wrap_socket(socket_, + keyfile=self.websocket_server_options.private_key, + certfile=self.websocket_server_options.certificate, + ssl_version=ssl.PROTOCOL_SSLv23, + ca_certs=self.websocket_server_options.tls_client_ca, + cert_reqs=client_cert_) + if _HAS_OPEN_SSL: + ctx = OpenSSL.SSL.Context(OpenSSL.SSL.SSLv23_METHOD) + ctx.use_privatekey_file( + self.websocket_server_options.private_key) + ctx.use_certificate_file( + self.websocket_server_options.certificate) + socket_ = OpenSSL.SSL.Connection(ctx, socket_) + self._sockets.append((socket_, addrinfo)) + + def server_bind(self): + """Override SocketServer.TCPServer.server_bind to enable multiple + sockets bind. + """ + + failed_sockets = [] + + for socketinfo in self._sockets: + socket_, addrinfo = socketinfo + self._logger.info('Bind on: %r', addrinfo) + if self.allow_reuse_address: + socket_.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + try: + socket_.bind(self.server_address) + except Exception, e: + self._logger.info('Skip by failure: %r', e) + socket_.close() + failed_sockets.append(socketinfo) + if self.server_address[1] == 0: + # The operating system assigns the actual port number for port + # number 0. This case, the second and later sockets should use + # the same port number. Also self.server_port is rewritten + # because it is exported, and will be used by external code. + self.server_address = ( + self.server_name, socket_.getsockname()[1]) + self.server_port = self.server_address[1] + self._logger.info('Port %r is assigned', self.server_port) + + for socketinfo in failed_sockets: + self._sockets.remove(socketinfo) + + def server_activate(self): + """Override SocketServer.TCPServer.server_activate to enable multiple + sockets listen. + """ + + failed_sockets = [] + + for socketinfo in self._sockets: + socket_, addrinfo = socketinfo + self._logger.info('Listen on: %r', addrinfo) + try: + socket_.listen(self.request_queue_size) + except Exception, e: + self._logger.info('Skip by failure: %r', e) + socket_.close() + failed_sockets.append(socketinfo) + + for socketinfo in failed_sockets: + self._sockets.remove(socketinfo) + + if len(self._sockets) == 0: + self._logger.critical( + 'No sockets activated. Use info log level to see the reason.') + + def server_close(self): + """Override SocketServer.TCPServer.server_close to enable multiple + sockets close. + """ + + for socketinfo in self._sockets: + socket_, addrinfo = socketinfo + self._logger.info('Close on: %r', addrinfo) + socket_.close() + + def fileno(self): + """Override SocketServer.TCPServer.fileno.""" + + self._logger.critical('Not supported: fileno') + return self._sockets[0][0].fileno() + + def handle_error(self, rquest, client_address): + """Override SocketServer.handle_error.""" + + self._logger.error( + 'Exception in processing request from: %r\n%s', + client_address, + util.get_stack_trace()) + # Note: client_address is a tuple. + + def get_request(self): + """Override TCPServer.get_request to wrap OpenSSL.SSL.Connection + object with _StandaloneSSLConnection to provide makefile method. We + cannot substitute OpenSSL.SSL.Connection.makefile since it's readonly + attribute. + """ + + accepted_socket, client_address = self.socket.accept() + if self.websocket_server_options.use_tls and _HAS_OPEN_SSL: + accepted_socket = _StandaloneSSLConnection(accepted_socket) + return accepted_socket, client_address + + def serve_forever(self, poll_interval=0.5): + """Override SocketServer.BaseServer.serve_forever.""" + + self.__ws_serving = True + self.__ws_is_shut_down.clear() + handle_request = self.handle_request + if hasattr(self, '_handle_request_noblock'): + handle_request = self._handle_request_noblock + else: + self._logger.warning('Fallback to blocking request handler') + try: + while self.__ws_serving: + r, w, e = select.select( + [socket_[0] for socket_ in self._sockets], + [], [], poll_interval) + for socket_ in r: + self.socket = socket_ + handle_request() + self.socket = None + finally: + self.__ws_is_shut_down.set() + + def shutdown(self): + """Override SocketServer.BaseServer.shutdown.""" + + self.__ws_serving = False + self.__ws_is_shut_down.wait() + + +class WebSocketRequestHandler(CGIHTTPServer.CGIHTTPRequestHandler): + """CGIHTTPRequestHandler specialized for WebSocket.""" + + # Use httplib.HTTPMessage instead of mimetools.Message. + MessageClass = httplib.HTTPMessage + + def setup(self): + """Override SocketServer.StreamRequestHandler.setup to wrap rfile + with MemorizingFile. + + This method will be called by BaseRequestHandler's constructor + before calling BaseHTTPRequestHandler.handle. + BaseHTTPRequestHandler.handle will call + BaseHTTPRequestHandler.handle_one_request and it will call + WebSocketRequestHandler.parse_request. + """ + + # Call superclass's setup to prepare rfile, wfile, etc. See setup + # definition on the root class SocketServer.StreamRequestHandler to + # understand what this does. + CGIHTTPServer.CGIHTTPRequestHandler.setup(self) + + self.rfile = memorizingfile.MemorizingFile( + self.rfile, + max_memorized_lines=_MAX_MEMORIZED_LINES) + + def __init__(self, request, client_address, server): + self._logger = util.get_class_logger(self) + + self._options = server.websocket_server_options + + # Overrides CGIHTTPServerRequestHandler.cgi_directories. + self.cgi_directories = self._options.cgi_directories + # Replace CGIHTTPRequestHandler.is_executable method. + if self._options.is_executable_method is not None: + self.is_executable = self._options.is_executable_method + + # This actually calls BaseRequestHandler.__init__. + CGIHTTPServer.CGIHTTPRequestHandler.__init__( + self, request, client_address, server) + + def parse_request(self): + """Override BaseHTTPServer.BaseHTTPRequestHandler.parse_request. + + Return True to continue processing for HTTP(S), False otherwise. + + See BaseHTTPRequestHandler.handle_one_request method which calls + this method to understand how the return value will be handled. + """ + + # We hook parse_request method, but also call the original + # CGIHTTPRequestHandler.parse_request since when we return False, + # CGIHTTPRequestHandler.handle_one_request continues processing and + # it needs variables set by CGIHTTPRequestHandler.parse_request. + # + # Variables set by this method will be also used by WebSocket request + # handling (self.path, self.command, self.requestline, etc. See also + # how _StandaloneRequest's members are implemented using these + # attributes). + if not CGIHTTPServer.CGIHTTPRequestHandler.parse_request(self): + return False + + if self._options.use_basic_auth: + auth = self.headers.getheader('Authorization') + if auth != self._options.basic_auth_credential: + self.send_response(401) + self.send_header('WWW-Authenticate', + 'Basic realm="Pywebsocket"') + self.end_headers() + self._logger.info('Request basic authentication') + return True + + host, port, resource = http_header_util.parse_uri(self.path) + if resource is None: + self._logger.info('Invalid URI: %r', self.path) + self._logger.info('Fallback to CGIHTTPRequestHandler') + return True + server_options = self.server.websocket_server_options + if host is not None: + validation_host = server_options.validation_host + if validation_host is not None and host != validation_host: + self._logger.info('Invalid host: %r (expected: %r)', + host, + validation_host) + self._logger.info('Fallback to CGIHTTPRequestHandler') + return True + if port is not None: + validation_port = server_options.validation_port + if validation_port is not None and port != validation_port: + self._logger.info('Invalid port: %r (expected: %r)', + port, + validation_port) + self._logger.info('Fallback to CGIHTTPRequestHandler') + return True + self.path = resource + + request = _StandaloneRequest(self, self._options.use_tls) + + try: + # Fallback to default http handler for request paths for which + # we don't have request handlers. + if not self._options.dispatcher.get_handler_suite(self.path): + self._logger.info('No handler for resource: %r', + self.path) + self._logger.info('Fallback to CGIHTTPRequestHandler') + return True + except dispatch.DispatchException, e: + self._logger.info('%s', e) + self.send_error(e.status) + return False + + # If any Exceptions without except clause setup (including + # DispatchException) is raised below this point, it will be caught + # and logged by WebSocketServer. + + try: + try: + handshake.do_handshake( + request, + self._options.dispatcher, + allowDraft75=self._options.allow_draft75, + strict=self._options.strict) + except handshake.VersionException, e: + self._logger.info('%s', e) + self.send_response(common.HTTP_STATUS_BAD_REQUEST) + self.send_header(common.SEC_WEBSOCKET_VERSION_HEADER, + e.supported_versions) + self.end_headers() + return False + except handshake.HandshakeException, e: + # Handshake for ws(s) failed. + self._logger.info('%s', e) + self.send_error(e.status) + return False + + request._dispatcher = self._options.dispatcher + self._options.dispatcher.transfer_data(request) + except handshake.AbortedByUserException, e: + self._logger.info('%s', e) + return False + + def log_request(self, code='-', size='-'): + """Override BaseHTTPServer.log_request.""" + + self._logger.info('"%s" %s %s', + self.requestline, str(code), str(size)) + + def log_error(self, *args): + """Override BaseHTTPServer.log_error.""" + + # Despite the name, this method is for warnings than for errors. + # For example, HTTP status code is logged by this method. + self._logger.warning('%s - %s', + self.address_string(), + args[0] % args[1:]) + + def is_cgi(self): + """Test whether self.path corresponds to a CGI script. + + Add extra check that self.path doesn't contains .. + Also check if the file is a executable file or not. + If the file is not executable, it is handled as static file or dir + rather than a CGI script. + """ + + if CGIHTTPServer.CGIHTTPRequestHandler.is_cgi(self): + if '..' in self.path: + return False + # strip query parameter from request path + resource_name = self.path.split('?', 2)[0] + # convert resource_name into real path name in filesystem. + scriptfile = self.translate_path(resource_name) + if not os.path.isfile(scriptfile): + return False + if not self.is_executable(scriptfile): + return False + return True + return False + + +def _get_logger_from_class(c): + return logging.getLogger('%s.%s' % (c.__module__, c.__name__)) + + +def _configure_logging(options): + logging.addLevelName(common.LOGLEVEL_FINE, 'FINE') + + logger = logging.getLogger() + logger.setLevel(logging.getLevelName(options.log_level.upper())) + if options.log_file: + handler = logging.handlers.RotatingFileHandler( + options.log_file, 'a', options.log_max, options.log_count) + else: + handler = logging.StreamHandler() + formatter = logging.Formatter( + '[%(asctime)s] [%(levelname)s] %(name)s: %(message)s') + handler.setFormatter(formatter) + logger.addHandler(handler) + + deflate_log_level_name = logging.getLevelName( + options.deflate_log_level.upper()) + _get_logger_from_class(util._Deflater).setLevel( + deflate_log_level_name) + _get_logger_from_class(util._Inflater).setLevel( + deflate_log_level_name) + + +def _build_option_parser(): + parser = optparse.OptionParser() + + parser.add_option('--config', dest='config_file', type='string', + default=None, + help=('Path to configuration file. See the file comment ' + 'at the top of this file for the configuration ' + 'file format')) + parser.add_option('-H', '--server-host', '--server_host', + dest='server_host', + default='', + help='server hostname to listen to') + parser.add_option('-V', '--validation-host', '--validation_host', + dest='validation_host', + default=None, + help='server hostname to validate in absolute path.') + parser.add_option('-p', '--port', dest='port', type='int', + default=common.DEFAULT_WEB_SOCKET_PORT, + help='port to listen to') + parser.add_option('-P', '--validation-port', '--validation_port', + dest='validation_port', type='int', + default=None, + help='server port to validate in absolute path.') + parser.add_option('-w', '--websock-handlers', '--websock_handlers', + dest='websock_handlers', + default='.', + help=('The root directory of WebSocket handler files. ' + 'If the path is relative, --document-root is used ' + 'as the base.')) + parser.add_option('-m', '--websock-handlers-map-file', + '--websock_handlers_map_file', + dest='websock_handlers_map_file', + default=None, + help=('WebSocket handlers map file. ' + 'Each line consists of alias_resource_path and ' + 'existing_resource_path, separated by spaces.')) + parser.add_option('-s', '--scan-dir', '--scan_dir', dest='scan_dir', + default=None, + help=('Must be a directory under --websock-handlers. ' + 'Only handlers under this directory are scanned ' + 'and registered to the server. ' + 'Useful for saving scan time when the handler ' + 'root directory contains lots of files that are ' + 'not handler file or are handler files but you ' + 'don\'t want them to be registered. ')) + parser.add_option('--allow-handlers-outside-root-dir', + '--allow_handlers_outside_root_dir', + dest='allow_handlers_outside_root_dir', + action='store_true', + default=False, + help=('Scans WebSocket handlers even if their canonical ' + 'path is not under --websock-handlers.')) + parser.add_option('-d', '--document-root', '--document_root', + dest='document_root', default='.', + help='Document root directory.') + parser.add_option('-x', '--cgi-paths', '--cgi_paths', dest='cgi_paths', + default=None, + help=('CGI paths relative to document_root.' + 'Comma-separated. (e.g -x /cgi,/htbin) ' + 'Files under document_root/cgi_path are handled ' + 'as CGI programs. Must be executable.')) + parser.add_option('-t', '--tls', dest='use_tls', action='store_true', + default=False, help='use TLS (wss://)') + parser.add_option('-k', '--private-key', '--private_key', + dest='private_key', + default='', help='TLS private key file.') + parser.add_option('-c', '--certificate', dest='certificate', + default='', help='TLS certificate file.') + parser.add_option('--tls-client-auth', dest='tls_client_auth', + action='store_true', default=False, + help='Requires TLS client auth on every connection.') + parser.add_option('--tls-client-ca', dest='tls_client_ca', default='', + help=('Specifies a pem file which contains a set of ' + 'concatenated CA certificates which are used to ' + 'validate certificates passed from clients')) + parser.add_option('--basic-auth', dest='use_basic_auth', + action='store_true', default=False, + help='Requires Basic authentication.') + parser.add_option('--basic-auth-credential', + dest='basic_auth_credential', default='test:test', + help='Specifies the credential of basic authentication ' + 'by username:password pair (e.g. test:test).') + parser.add_option('-l', '--log-file', '--log_file', dest='log_file', + default='', help='Log file.') + # Custom log level: + # - FINE: Prints status of each frame processing step + parser.add_option('--log-level', '--log_level', type='choice', + dest='log_level', default='warn', + choices=['fine', + 'debug', 'info', 'warning', 'warn', 'error', + 'critical'], + help='Log level.') + parser.add_option('--deflate-log-level', '--deflate_log_level', + type='choice', + dest='deflate_log_level', default='warn', + choices=['debug', 'info', 'warning', 'warn', 'error', + 'critical'], + help='Log level for _Deflater and _Inflater.') + parser.add_option('--thread-monitor-interval-in-sec', + '--thread_monitor_interval_in_sec', + dest='thread_monitor_interval_in_sec', + type='int', default=-1, + help=('If positive integer is specified, run a thread ' + 'monitor to show the status of server threads ' + 'periodically in the specified inteval in ' + 'second. If non-positive integer is specified, ' + 'disable the thread monitor.')) + parser.add_option('--log-max', '--log_max', dest='log_max', type='int', + default=_DEFAULT_LOG_MAX_BYTES, + help='Log maximum bytes') + parser.add_option('--log-count', '--log_count', dest='log_count', + type='int', default=_DEFAULT_LOG_BACKUP_COUNT, + help='Log backup count') + parser.add_option('--allow-draft75', dest='allow_draft75', + action='store_true', default=False, + help='Obsolete option. Ignored.') + parser.add_option('--strict', dest='strict', action='store_true', + default=False, help='Obsolete option. Ignored.') + parser.add_option('-q', '--queue', dest='request_queue_size', type='int', + default=_DEFAULT_REQUEST_QUEUE_SIZE, + help='request queue size') + + return parser + + +class ThreadMonitor(threading.Thread): + daemon = True + + def __init__(self, interval_in_sec): + threading.Thread.__init__(self, name='ThreadMonitor') + + self._logger = util.get_class_logger(self) + + self._interval_in_sec = interval_in_sec + + def run(self): + while True: + thread_name_list = [] + for thread in threading.enumerate(): + thread_name_list.append(thread.name) + self._logger.info( + "%d active threads: %s", + threading.active_count(), + ', '.join(thread_name_list)) + time.sleep(self._interval_in_sec) + + +def _parse_args_and_config(args): + parser = _build_option_parser() + + # First, parse options without configuration file. + temporary_options, temporary_args = parser.parse_args(args=args) + if temporary_args: + logging.critical( + 'Unrecognized positional arguments: %r', temporary_args) + sys.exit(1) + + if temporary_options.config_file: + try: + config_fp = open(temporary_options.config_file, 'r') + except IOError, e: + logging.critical( + 'Failed to open configuration file %r: %r', + temporary_options.config_file, + e) + sys.exit(1) + + config_parser = ConfigParser.SafeConfigParser() + config_parser.readfp(config_fp) + config_fp.close() + + args_from_config = [] + for name, value in config_parser.items('pywebsocket'): + args_from_config.append('--' + name) + args_from_config.append(value) + if args is None: + args = args_from_config + else: + args = args_from_config + args + return parser.parse_args(args=args) + else: + return temporary_options, temporary_args + + +def _main(args=None): + """You can call this function from your own program, but please note that + this function has some side-effects that might affect your program. For + example, util.wrap_popen3_for_win use in this method replaces implementation + of os.popen3. + """ + + options, args = _parse_args_and_config(args=args) + + os.chdir(options.document_root) + + _configure_logging(options) + + # TODO(tyoshino): Clean up initialization of CGI related values. Move some + # of code here to WebSocketRequestHandler class if it's better. + options.cgi_directories = [] + options.is_executable_method = None + if options.cgi_paths: + options.cgi_directories = options.cgi_paths.split(',') + if sys.platform in ('cygwin', 'win32'): + cygwin_path = None + # For Win32 Python, it is expected that CYGWIN_PATH + # is set to a directory of cygwin binaries. + # For example, websocket_server.py in Chromium sets CYGWIN_PATH to + # full path of third_party/cygwin/bin. + if 'CYGWIN_PATH' in os.environ: + cygwin_path = os.environ['CYGWIN_PATH'] + util.wrap_popen3_for_win(cygwin_path) + + def __check_script(scriptpath): + return util.get_script_interp(scriptpath, cygwin_path) + + options.is_executable_method = __check_script + + if options.use_tls: + if not (_HAS_SSL or _HAS_OPEN_SSL): + logging.critical('TLS support requires ssl or pyOpenSSL module.') + sys.exit(1) + if not options.private_key or not options.certificate: + logging.critical( + 'To use TLS, specify private_key and certificate.') + sys.exit(1) + + if options.tls_client_auth: + if not options.use_tls: + logging.critical('TLS must be enabled for client authentication.') + sys.exit(1) + if not _HAS_SSL: + logging.critical('Client authentication requires ssl module.') + + if not options.scan_dir: + options.scan_dir = options.websock_handlers + + if options.use_basic_auth: + options.basic_auth_credential = 'Basic ' + base64.b64encode( + options.basic_auth_credential) + + try: + if options.thread_monitor_interval_in_sec > 0: + # Run a thread monitor to show the status of server threads for + # debugging. + ThreadMonitor(options.thread_monitor_interval_in_sec).start() + + server = WebSocketServer(options) + server.serve_forever() + except Exception, e: + logging.critical('mod_pywebsocket: %s' % e) + logging.critical('mod_pywebsocket: %s' % util.get_stack_trace()) + sys.exit(1) + + +if __name__ == '__main__': + _main(sys.argv[1:]) + + +# vi:sts=4 sw=4 et diff --git a/module/lib/mod_pywebsocket/stream.py b/module/lib/mod_pywebsocket/stream.py new file mode 100644 index 000000000..edc533279 --- /dev/null +++ b/module/lib/mod_pywebsocket/stream.py @@ -0,0 +1,57 @@ +# Copyright 2011, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +"""This file exports public symbols. +""" + + +from mod_pywebsocket._stream_base import BadOperationException +from mod_pywebsocket._stream_base import ConnectionTerminatedException +from mod_pywebsocket._stream_base import InvalidFrameException +from mod_pywebsocket._stream_base import InvalidUTF8Exception +from mod_pywebsocket._stream_base import UnsupportedFrameException +from mod_pywebsocket._stream_hixie75 import StreamHixie75 +from mod_pywebsocket._stream_hybi import Frame +from mod_pywebsocket._stream_hybi import Stream +from mod_pywebsocket._stream_hybi import StreamOptions + +# These methods are intended to be used by WebSocket client developers to have +# their implementations receive broken data in tests. +from mod_pywebsocket._stream_hybi import create_close_frame +from mod_pywebsocket._stream_hybi import create_header +from mod_pywebsocket._stream_hybi import create_length_header +from mod_pywebsocket._stream_hybi import create_ping_frame +from mod_pywebsocket._stream_hybi import create_pong_frame +from mod_pywebsocket._stream_hybi import create_binary_frame +from mod_pywebsocket._stream_hybi import create_text_frame +from mod_pywebsocket._stream_hybi import create_closing_handshake_body + + +# vi:sts=4 sw=4 et diff --git a/module/lib/mod_pywebsocket/util.py b/module/lib/mod_pywebsocket/util.py new file mode 100644 index 000000000..7bb0b5d9e --- /dev/null +++ b/module/lib/mod_pywebsocket/util.py @@ -0,0 +1,515 @@ +# Copyright 2011, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +"""WebSocket utilities. +""" + + +import array +import errno + +# Import hash classes from a module available and recommended for each Python +# version and re-export those symbol. Use sha and md5 module in Python 2.4, and +# hashlib module in Python 2.6. +try: + import hashlib + md5_hash = hashlib.md5 + sha1_hash = hashlib.sha1 +except ImportError: + import md5 + import sha + md5_hash = md5.md5 + sha1_hash = sha.sha + +import StringIO +import logging +import os +import re +import socket +import traceback +import zlib + + +def get_stack_trace(): + """Get the current stack trace as string. + + This is needed to support Python 2.3. + TODO: Remove this when we only support Python 2.4 and above. + Use traceback.format_exc instead. + """ + + out = StringIO.StringIO() + traceback.print_exc(file=out) + return out.getvalue() + + +def prepend_message_to_exception(message, exc): + """Prepend message to the exception.""" + + exc.args = (message + str(exc),) + return + + +def __translate_interp(interp, cygwin_path): + """Translate interp program path for Win32 python to run cygwin program + (e.g. perl). Note that it doesn't support path that contains space, + which is typically true for Unix, where #!-script is written. + For Win32 python, cygwin_path is a directory of cygwin binaries. + + Args: + interp: interp command line + cygwin_path: directory name of cygwin binary, or None + Returns: + translated interp command line. + """ + if not cygwin_path: + return interp + m = re.match('^[^ ]*/([^ ]+)( .*)?', interp) + if m: + cmd = os.path.join(cygwin_path, m.group(1)) + return cmd + m.group(2) + return interp + + +def get_script_interp(script_path, cygwin_path=None): + """Gets #!-interpreter command line from the script. + + It also fixes command path. When Cygwin Python is used, e.g. in WebKit, + it could run "/usr/bin/perl -wT hello.pl". + When Win32 Python is used, e.g. in Chromium, it couldn't. So, fix + "/usr/bin/perl" to "<cygwin_path>\perl.exe". + + Args: + script_path: pathname of the script + cygwin_path: directory name of cygwin binary, or None + Returns: + #!-interpreter command line, or None if it is not #!-script. + """ + fp = open(script_path) + line = fp.readline() + fp.close() + m = re.match('^#!(.*)', line) + if m: + return __translate_interp(m.group(1), cygwin_path) + return None + + +def wrap_popen3_for_win(cygwin_path): + """Wrap popen3 to support #!-script on Windows. + + Args: + cygwin_path: path for cygwin binary if command path is needed to be + translated. None if no translation required. + """ + + __orig_popen3 = os.popen3 + + def __wrap_popen3(cmd, mode='t', bufsize=-1): + cmdline = cmd.split(' ') + interp = get_script_interp(cmdline[0], cygwin_path) + if interp: + cmd = interp + ' ' + cmd + return __orig_popen3(cmd, mode, bufsize) + + os.popen3 = __wrap_popen3 + + +def hexify(s): + return ' '.join(map(lambda x: '%02x' % ord(x), s)) + + +def get_class_logger(o): + return logging.getLogger( + '%s.%s' % (o.__class__.__module__, o.__class__.__name__)) + + +class NoopMasker(object): + """A masking object that has the same interface as RepeatedXorMasker but + just returns the string passed in without making any change. + """ + + def __init__(self): + pass + + def mask(self, s): + return s + + +class RepeatedXorMasker(object): + """A masking object that applies XOR on the string given to mask method + with the masking bytes given to the constructor repeatedly. This object + remembers the position in the masking bytes the last mask method call + ended and resumes from that point on the next mask method call. + """ + + def __init__(self, mask): + self._mask = map(ord, mask) + self._mask_size = len(self._mask) + self._count = 0 + + def mask(self, s): + result = array.array('B') + result.fromstring(s) + # Use temporary local variables to eliminate the cost to access + # attributes + count = self._count + mask = self._mask + mask_size = self._mask_size + for i in xrange(len(result)): + result[i] ^= mask[count] + count = (count + 1) % mask_size + self._count = count + + return result.tostring() + + +class DeflateRequest(object): + """A wrapper class for request object to intercept send and recv to perform + deflate compression and decompression transparently. + """ + + def __init__(self, request): + self._request = request + self.connection = DeflateConnection(request.connection) + + def __getattribute__(self, name): + if name in ('_request', 'connection'): + return object.__getattribute__(self, name) + return self._request.__getattribute__(name) + + def __setattr__(self, name, value): + if name in ('_request', 'connection'): + return object.__setattr__(self, name, value) + return self._request.__setattr__(name, value) + + +# By making wbits option negative, we can suppress CMF/FLG (2 octet) and +# ADLER32 (4 octet) fields of zlib so that we can use zlib module just as +# deflate library. DICTID won't be added as far as we don't set dictionary. +# LZ77 window of 32K will be used for both compression and decompression. +# For decompression, we can just use 32K to cover any windows size. For +# compression, we use 32K so receivers must use 32K. +# +# Compression level is Z_DEFAULT_COMPRESSION. We don't have to match level +# to decode. +# +# See zconf.h, deflate.cc, inflate.cc of zlib library, and zlibmodule.c of +# Python. See also RFC1950 (ZLIB 3.3). + + +class _Deflater(object): + + def __init__(self, window_bits): + self._logger = get_class_logger(self) + + self._compress = zlib.compressobj( + zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -window_bits) + + def compress(self, bytes): + compressed_bytes = self._compress.compress(bytes) + self._logger.debug('Compress input %r', bytes) + self._logger.debug('Compress result %r', compressed_bytes) + return compressed_bytes + + def compress_and_flush(self, bytes): + compressed_bytes = self._compress.compress(bytes) + compressed_bytes += self._compress.flush(zlib.Z_SYNC_FLUSH) + self._logger.debug('Compress input %r', bytes) + self._logger.debug('Compress result %r', compressed_bytes) + return compressed_bytes + + def compress_and_finish(self, bytes): + compressed_bytes = self._compress.compress(bytes) + compressed_bytes += self._compress.flush(zlib.Z_FINISH) + self._logger.debug('Compress input %r', bytes) + self._logger.debug('Compress result %r', compressed_bytes) + return compressed_bytes + +class _Inflater(object): + + def __init__(self): + self._logger = get_class_logger(self) + + self._unconsumed = '' + + self.reset() + + def decompress(self, size): + if not (size == -1 or size > 0): + raise Exception('size must be -1 or positive') + + data = '' + + while True: + if size == -1: + data += self._decompress.decompress(self._unconsumed) + # See Python bug http://bugs.python.org/issue12050 to + # understand why the same code cannot be used for updating + # self._unconsumed for here and else block. + self._unconsumed = '' + else: + data += self._decompress.decompress( + self._unconsumed, size - len(data)) + self._unconsumed = self._decompress.unconsumed_tail + if self._decompress.unused_data: + # Encountered a last block (i.e. a block with BFINAL = 1) and + # found a new stream (unused_data). We cannot use the same + # zlib.Decompress object for the new stream. Create a new + # Decompress object to decompress the new one. + # + # It's fine to ignore unconsumed_tail if unused_data is not + # empty. + self._unconsumed = self._decompress.unused_data + self.reset() + if size >= 0 and len(data) == size: + # data is filled. Don't call decompress again. + break + else: + # Re-invoke Decompress.decompress to try to decompress all + # available bytes before invoking read which blocks until + # any new byte is available. + continue + else: + # Here, since unused_data is empty, even if unconsumed_tail is + # not empty, bytes of requested length are already in data. We + # don't have to "continue" here. + break + + if data: + self._logger.debug('Decompressed %r', data) + return data + + def append(self, data): + self._logger.debug('Appended %r', data) + self._unconsumed += data + + def reset(self): + self._logger.debug('Reset') + self._decompress = zlib.decompressobj(-zlib.MAX_WBITS) + + +# Compresses/decompresses given octets using the method introduced in RFC1979. + + +class _RFC1979Deflater(object): + """A compressor class that applies DEFLATE to given byte sequence and + flushes using the algorithm described in the RFC1979 section 2.1. + """ + + def __init__(self, window_bits, no_context_takeover): + self._deflater = None + if window_bits is None: + window_bits = zlib.MAX_WBITS + self._window_bits = window_bits + self._no_context_takeover = no_context_takeover + + def filter(self, bytes, flush=True, bfinal=False): + if self._deflater is None or (self._no_context_takeover and flush): + self._deflater = _Deflater(self._window_bits) + + if bfinal: + result = self._deflater.compress_and_finish(bytes) + # Add a padding block with BFINAL = 0 and BTYPE = 0. + result = result + chr(0) + self._deflater = None + return result + if flush: + # Strip last 4 octets which is LEN and NLEN field of a + # non-compressed block added for Z_SYNC_FLUSH. + return self._deflater.compress_and_flush(bytes)[:-4] + return self._deflater.compress(bytes) + +class _RFC1979Inflater(object): + """A decompressor class for byte sequence compressed and flushed following + the algorithm described in the RFC1979 section 2.1. + """ + + def __init__(self): + self._inflater = _Inflater() + + def filter(self, bytes): + # Restore stripped LEN and NLEN field of a non-compressed block added + # for Z_SYNC_FLUSH. + self._inflater.append(bytes + '\x00\x00\xff\xff') + return self._inflater.decompress(-1) + + +class DeflateSocket(object): + """A wrapper class for socket object to intercept send and recv to perform + deflate compression and decompression transparently. + """ + + # Size of the buffer passed to recv to receive compressed data. + _RECV_SIZE = 4096 + + def __init__(self, socket): + self._socket = socket + + self._logger = get_class_logger(self) + + self._deflater = _Deflater(zlib.MAX_WBITS) + self._inflater = _Inflater() + + def recv(self, size): + """Receives data from the socket specified on the construction up + to the specified size. Once any data is available, returns it even + if it's smaller than the specified size. + """ + + # TODO(tyoshino): Allow call with size=0. It should block until any + # decompressed data is available. + if size <= 0: + raise Exception('Non-positive size passed') + while True: + data = self._inflater.decompress(size) + if len(data) != 0: + return data + + read_data = self._socket.recv(DeflateSocket._RECV_SIZE) + if not read_data: + return '' + self._inflater.append(read_data) + + def sendall(self, bytes): + self.send(bytes) + + def send(self, bytes): + self._socket.sendall(self._deflater.compress_and_flush(bytes)) + return len(bytes) + + +class DeflateConnection(object): + """A wrapper class for request object to intercept write and read to + perform deflate compression and decompression transparently. + """ + + def __init__(self, connection): + self._connection = connection + + self._logger = get_class_logger(self) + + self._deflater = _Deflater(zlib.MAX_WBITS) + self._inflater = _Inflater() + + def get_remote_addr(self): + return self._connection.remote_addr + remote_addr = property(get_remote_addr) + + def put_bytes(self, bytes): + self.write(bytes) + + def read(self, size=-1): + """Reads at most size bytes. Blocks until there's at least one byte + available. + """ + + # TODO(tyoshino): Allow call with size=0. + if not (size == -1 or size > 0): + raise Exception('size must be -1 or positive') + + data = '' + while True: + if size == -1: + data += self._inflater.decompress(-1) + else: + data += self._inflater.decompress(size - len(data)) + + if size >= 0 and len(data) != 0: + break + + # TODO(tyoshino): Make this read efficient by some workaround. + # + # In 3.0.3 and prior of mod_python, read blocks until length bytes + # was read. We don't know the exact size to read while using + # deflate, so read byte-by-byte. + # + # _StandaloneRequest.read that ultimately performs + # socket._fileobject.read also blocks until length bytes was read + read_data = self._connection.read(1) + if not read_data: + break + self._inflater.append(read_data) + return data + + def write(self, bytes): + self._connection.write(self._deflater.compress_and_flush(bytes)) + + +def _is_ewouldblock_errno(error_number): + """Returns True iff error_number indicates that receive operation would + block. To make this portable, we check availability of errno and then + compare them. + """ + + for error_name in ['WSAEWOULDBLOCK', 'EWOULDBLOCK', 'EAGAIN']: + if (error_name in dir(errno) and + error_number == getattr(errno, error_name)): + return True + return False + + +def drain_received_data(raw_socket): + # Set the socket non-blocking. + original_timeout = raw_socket.gettimeout() + raw_socket.settimeout(0.0) + + drained_data = [] + + # Drain until the socket is closed or no data is immediately + # available for read. + while True: + try: + data = raw_socket.recv(1) + if not data: + break + drained_data.append(data) + except socket.error, e: + # e can be either a pair (errno, string) or just a string (or + # something else) telling what went wrong. We suppress only + # the errors that indicates that the socket blocks. Those + # exceptions can be parsed as a pair (errno, string). + try: + error_number, message = e + except: + # Failed to parse socket.error. + raise e + + if _is_ewouldblock_errno(error_number): + break + else: + raise e + + # Rollback timeout value. + raw_socket.settimeout(original_timeout) + + return ''.join(drained_data) + + +# vi:sts=4 sw=4 et diff --git a/module/lib/new_collections.py b/module/lib/new_collections.py new file mode 100644 index 000000000..12d05b4b9 --- /dev/null +++ b/module/lib/new_collections.py @@ -0,0 +1,375 @@ +## {{{ http://code.activestate.com/recipes/576693/ (r9) +# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy. +# Passes Python2.7's test suite and incorporates all the latest updates. + +try: + from thread import get_ident as _get_ident +except ImportError: + from dummy_thread import get_ident as _get_ident + +try: + from _abcoll import KeysView, ValuesView, ItemsView +except ImportError: + pass + + +class OrderedDict(dict): + 'Dictionary that remembers insertion order' + # An inherited dict maps keys to values. + # The inherited dict provides __getitem__, __len__, __contains__, and get. + # The remaining methods are order-aware. + # Big-O running times for all methods are the same as for regular dictionaries. + + # The internal self.__map dictionary maps keys to links in a doubly linked list. + # The circular doubly linked list starts and ends with a sentinel element. + # The sentinel element never gets deleted (this simplifies the algorithm). + # Each link is stored as a list of length three: [PREV, NEXT, KEY]. + + def __init__(self, *args, **kwds): + '''Initialize an ordered dictionary. Signature is the same as for + regular dictionaries, but keyword arguments are not recommended + because their insertion order is arbitrary. + + ''' + if len(args) > 1: + raise TypeError('expected at most 1 arguments, got %d' % len(args)) + try: + self.__root + except AttributeError: + self.__root = root = [] # sentinel node + root[:] = [root, root, None] + self.__map = {} + self.__update(*args, **kwds) + + def __setitem__(self, key, value, dict_setitem=dict.__setitem__): + 'od.__setitem__(i, y) <==> od[i]=y' + # Setting a new item creates a new link which goes at the end of the linked + # list, and the inherited dictionary is updated with the new key/value pair. + if key not in self: + root = self.__root + last = root[0] + last[1] = root[0] = self.__map[key] = [last, root, key] + dict_setitem(self, key, value) + + def __delitem__(self, key, dict_delitem=dict.__delitem__): + 'od.__delitem__(y) <==> del od[y]' + # Deleting an existing item uses self.__map to find the link which is + # then removed by updating the links in the predecessor and successor nodes. + dict_delitem(self, key) + link_prev, link_next, key = self.__map.pop(key) + link_prev[1] = link_next + link_next[0] = link_prev + + def __iter__(self): + 'od.__iter__() <==> iter(od)' + root = self.__root + curr = root[1] + while curr is not root: + yield curr[2] + curr = curr[1] + + def __reversed__(self): + 'od.__reversed__() <==> reversed(od)' + root = self.__root + curr = root[0] + while curr is not root: + yield curr[2] + curr = curr[0] + + def clear(self): + 'od.clear() -> None. Remove all items from od.' + try: + for node in self.__map.itervalues(): + del node[:] + root = self.__root + root[:] = [root, root, None] + self.__map.clear() + except AttributeError: + pass + dict.clear(self) + + def popitem(self, last=True): + '''od.popitem() -> (k, v), return and remove a (key, value) pair. + Pairs are returned in LIFO order if last is true or FIFO order if false. + + ''' + if not self: + raise KeyError('dictionary is empty') + root = self.__root + if last: + link = root[0] + link_prev = link[0] + link_prev[1] = root + root[0] = link_prev + else: + link = root[1] + link_next = link[1] + root[1] = link_next + link_next[0] = root + key = link[2] + del self.__map[key] + value = dict.pop(self, key) + return key, value + + # -- the following methods do not depend on the internal structure -- + + def keys(self): + 'od.keys() -> list of keys in od' + return list(self) + + def values(self): + 'od.values() -> list of values in od' + return [self[key] for key in self] + + def items(self): + 'od.items() -> list of (key, value) pairs in od' + return [(key, self[key]) for key in self] + + def iterkeys(self): + 'od.iterkeys() -> an iterator over the keys in od' + return iter(self) + + def itervalues(self): + 'od.itervalues -> an iterator over the values in od' + for k in self: + yield self[k] + + def iteritems(self): + 'od.iteritems -> an iterator over the (key, value) items in od' + for k in self: + yield (k, self[k]) + + def update(*args, **kwds): + '''od.update(E, **F) -> None. Update od from dict/iterable E and F. + + If E is a dict instance, does: for k in E: od[k] = E[k] + If E has a .keys() method, does: for k in E.keys(): od[k] = E[k] + Or if E is an iterable of items, does: for k, v in E: od[k] = v + In either case, this is followed by: for k, v in F.items(): od[k] = v + + ''' + if len(args) > 2: + raise TypeError('update() takes at most 2 positional ' + 'arguments (%d given)' % (len(args),)) + elif not args: + raise TypeError('update() takes at least 1 argument (0 given)') + self = args[0] + # Make progressively weaker assumptions about "other" + other = () + if len(args) == 2: + other = args[1] + if isinstance(other, dict): + for key in other: + self[key] = other[key] + elif hasattr(other, 'keys'): + for key in other.keys(): + self[key] = other[key] + else: + for key, value in other: + self[key] = value + for key, value in kwds.items(): + self[key] = value + + __update = update # let subclasses override update without breaking __init__ + + __marker = object() + + def pop(self, key, default=__marker): + '''od.pop(k[,d]) -> v, remove specified key and return the corresponding value. + If key is not found, d is returned if given, otherwise KeyError is raised. + + ''' + if key in self: + result = self[key] + del self[key] + return result + if default is self.__marker: + raise KeyError(key) + return default + + def setdefault(self, key, default=None): + 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od' + if key in self: + return self[key] + self[key] = default + return default + + def __repr__(self, _repr_running={}): + 'od.__repr__() <==> repr(od)' + call_key = id(self), _get_ident() + if call_key in _repr_running: + return '...' + _repr_running[call_key] = 1 + try: + if not self: + return '%s()' % (self.__class__.__name__,) + return '%s(%r)' % (self.__class__.__name__, self.items()) + finally: + del _repr_running[call_key] + + def __reduce__(self): + 'Return state information for pickling' + items = [[k, self[k]] for k in self] + inst_dict = vars(self).copy() + for k in vars(OrderedDict()): + inst_dict.pop(k, None) + if inst_dict: + return (self.__class__, (items,), inst_dict) + return self.__class__, (items,) + + def copy(self): + 'od.copy() -> a shallow copy of od' + return self.__class__(self) + + @classmethod + def fromkeys(cls, iterable, value=None): + '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S + and values equal to v (which defaults to None). + + ''' + d = cls() + for key in iterable: + d[key] = value + return d + + def __eq__(self, other): + '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive + while comparison to a regular mapping is order-insensitive. + + ''' + if isinstance(other, OrderedDict): + return len(self)==len(other) and self.items() == other.items() + return dict.__eq__(self, other) + + def __ne__(self, other): + return not self == other + + # -- the following methods are only used in Python 2.7 -- + + def viewkeys(self): + "od.viewkeys() -> a set-like object providing a view on od's keys" + return KeysView(self) + + def viewvalues(self): + "od.viewvalues() -> an object providing a view on od's values" + return ValuesView(self) + + def viewitems(self): + "od.viewitems() -> a set-like object providing a view on od's items" + return ItemsView(self) +## end of http://code.activestate.com/recipes/576693/ }}} + +## {{{ http://code.activestate.com/recipes/500261/ (r15) +from operator import itemgetter as _itemgetter +from keyword import iskeyword as _iskeyword +import sys as _sys + +def namedtuple(typename, field_names, verbose=False, rename=False): + """Returns a new subclass of tuple with named fields. + + >>> Point = namedtuple('Point', 'x y') + >>> Point.__doc__ # docstring for the new class + 'Point(x, y)' + >>> p = Point(11, y=22) # instantiate with positional args or keywords + >>> p[0] + p[1] # indexable like a plain tuple + 33 + >>> x, y = p # unpack like a regular tuple + >>> x, y + (11, 22) + >>> p.x + p.y # fields also accessable by name + 33 + >>> d = p._asdict() # convert to a dictionary + >>> d['x'] + 11 + >>> Point(**d) # convert from a dictionary + Point(x=11, y=22) + >>> p._replace(x=100) # _replace() is like str.replace() but targets named fields + Point(x=100, y=22) + + """ + + # Parse and validate the field names. Validation serves two purposes, + # generating informative error messages and preventing template injection attacks. + if isinstance(field_names, basestring): + field_names = field_names.replace(',', ' ').split() # names separated by whitespace and/or commas + field_names = tuple(map(str, field_names)) + if rename: + names = list(field_names) + seen = set() + for i, name in enumerate(names): + if (not min(c.isalnum() or c=='_' for c in name) or _iskeyword(name) + or not name or name[0].isdigit() or name.startswith('_') + or name in seen): + names[i] = '_%d' % i + seen.add(name) + field_names = tuple(names) + for name in (typename,) + field_names: + if not min(c.isalnum() or c=='_' for c in name): + raise ValueError('Type names and field names can only contain alphanumeric characters and underscores: %r' % name) + if _iskeyword(name): + raise ValueError('Type names and field names cannot be a keyword: %r' % name) + if name[0].isdigit(): + raise ValueError('Type names and field names cannot start with a number: %r' % name) + seen_names = set() + for name in field_names: + if name.startswith('_') and not rename: + raise ValueError('Field names cannot start with an underscore: %r' % name) + if name in seen_names: + raise ValueError('Encountered duplicate field name: %r' % name) + seen_names.add(name) + + # Create and fill-in the class template + numfields = len(field_names) + argtxt = repr(field_names).replace("'", "")[1:-1] # tuple repr without parens or quotes + reprtxt = ', '.join('%s=%%r' % name for name in field_names) + template = '''class %(typename)s(tuple): + '%(typename)s(%(argtxt)s)' \n + __slots__ = () \n + _fields = %(field_names)r \n + def __new__(_cls, %(argtxt)s): + return _tuple.__new__(_cls, (%(argtxt)s)) \n + @classmethod + def _make(cls, iterable, new=tuple.__new__, len=len): + 'Make a new %(typename)s object from a sequence or iterable' + result = new(cls, iterable) + if len(result) != %(numfields)d: + raise TypeError('Expected %(numfields)d arguments, got %%d' %% len(result)) + return result \n + def __repr__(self): + return '%(typename)s(%(reprtxt)s)' %% self \n + def _asdict(self): + 'Return a new dict which maps field names to their values' + return dict(zip(self._fields, self)) \n + def _replace(_self, **kwds): + 'Return a new %(typename)s object replacing specified fields with new values' + result = _self._make(map(kwds.pop, %(field_names)r, _self)) + if kwds: + raise ValueError('Got unexpected field names: %%r' %% kwds.keys()) + return result \n + def __getnewargs__(self): + return tuple(self) \n\n''' % locals() + for i, name in enumerate(field_names): + template += ' %s = _property(_itemgetter(%d))\n' % (name, i) + if verbose: + print template + + # Execute the template string in a temporary namespace + namespace = dict(_itemgetter=_itemgetter, __name__='namedtuple_%s' % typename, + _property=property, _tuple=tuple) + try: + exec template in namespace + except SyntaxError, e: + raise SyntaxError(e.message + ':\n' + template) + result = namespace[typename] + + # For pickling to work, the __module__ variable needs to be set to the frame + # where the named tuple is created. Bypass this step in enviroments where + # sys._getframe is not defined (Jython for example) or sys._getframe is not + # defined for arguments greater than 0 (IronPython). + try: + result.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__') + except (AttributeError, ValueError): + pass + + return result +## end of http://code.activestate.com/recipes/500261/ }}} diff --git a/module/lib/thrift/TSCons.py b/module/lib/thrift/TSCons.py deleted file mode 100644 index 24046256c..000000000 --- a/module/lib/thrift/TSCons.py +++ /dev/null @@ -1,33 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -from os import path -from SCons.Builder import Builder - -def scons_env(env, add=''): - opath = path.dirname(path.abspath('$TARGET')) - lstr = 'thrift --gen cpp -o ' + opath + ' ' + add + ' $SOURCE' - cppbuild = Builder(action = lstr) - env.Append(BUILDERS = {'ThriftCpp' : cppbuild}) - -def gen_cpp(env, dir, file): - scons_env(env) - suffixes = ['_types.h', '_types.cpp'] - targets = map(lambda s: 'gen-cpp/' + file + s, suffixes) - return env.ThriftCpp(targets, dir+file+'.thrift') diff --git a/module/lib/thrift/TSerialization.py b/module/lib/thrift/TSerialization.py deleted file mode 100644 index b19f98aa8..000000000 --- a/module/lib/thrift/TSerialization.py +++ /dev/null @@ -1,34 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -from protocol import TBinaryProtocol -from transport import TTransport - -def serialize(thrift_object, protocol_factory = TBinaryProtocol.TBinaryProtocolFactory()): - transport = TTransport.TMemoryBuffer() - protocol = protocol_factory.getProtocol(transport) - thrift_object.write(protocol) - return transport.getvalue() - -def deserialize(base, buf, protocol_factory = TBinaryProtocol.TBinaryProtocolFactory()): - transport = TTransport.TMemoryBuffer(buf) - protocol = protocol_factory.getProtocol(transport) - base.read(protocol) - return base - diff --git a/module/lib/thrift/Thrift.py b/module/lib/thrift/Thrift.py deleted file mode 100644 index 1d271fcff..000000000 --- a/module/lib/thrift/Thrift.py +++ /dev/null @@ -1,154 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -import sys - -class TType: - STOP = 0 - VOID = 1 - BOOL = 2 - BYTE = 3 - I08 = 3 - DOUBLE = 4 - I16 = 6 - I32 = 8 - I64 = 10 - STRING = 11 - UTF7 = 11 - STRUCT = 12 - MAP = 13 - SET = 14 - LIST = 15 - UTF8 = 16 - UTF16 = 17 - - _VALUES_TO_NAMES = ( 'STOP', - 'VOID', - 'BOOL', - 'BYTE', - 'DOUBLE', - None, - 'I16', - None, - 'I32', - None, - 'I64', - 'STRING', - 'STRUCT', - 'MAP', - 'SET', - 'LIST', - 'UTF8', - 'UTF16' ) - -class TMessageType: - CALL = 1 - REPLY = 2 - EXCEPTION = 3 - ONEWAY = 4 - -class TProcessor: - - """Base class for procsessor, which works on two streams.""" - - def process(iprot, oprot): - pass - -class TException(Exception): - - """Base class for all thrift exceptions.""" - - # BaseException.message is deprecated in Python v[2.6,3.0) - if (2,6,0) <= sys.version_info < (3,0): - def _get_message(self): - return self._message - def _set_message(self, message): - self._message = message - message = property(_get_message, _set_message) - - def __init__(self, message=None): - Exception.__init__(self, message) - self.message = message - -class TApplicationException(TException): - - """Application level thrift exceptions.""" - - UNKNOWN = 0 - UNKNOWN_METHOD = 1 - INVALID_MESSAGE_TYPE = 2 - WRONG_METHOD_NAME = 3 - BAD_SEQUENCE_ID = 4 - MISSING_RESULT = 5 - INTERNAL_ERROR = 6 - PROTOCOL_ERROR = 7 - - def __init__(self, type=UNKNOWN, message=None): - TException.__init__(self, message) - self.type = type - - def __str__(self): - if self.message: - return self.message - elif self.type == self.UNKNOWN_METHOD: - return 'Unknown method' - elif self.type == self.INVALID_MESSAGE_TYPE: - return 'Invalid message type' - elif self.type == self.WRONG_METHOD_NAME: - return 'Wrong method name' - elif self.type == self.BAD_SEQUENCE_ID: - return 'Bad sequence ID' - elif self.type == self.MISSING_RESULT: - return 'Missing result' - else: - return 'Default (unknown) TApplicationException' - - def read(self, iprot): - iprot.readStructBegin() - while True: - (fname, ftype, fid) = iprot.readFieldBegin() - if ftype == TType.STOP: - break - if fid == 1: - if ftype == TType.STRING: - self.message = iprot.readString(); - else: - iprot.skip(ftype) - elif fid == 2: - if ftype == TType.I32: - self.type = iprot.readI32(); - else: - iprot.skip(ftype) - else: - iprot.skip(ftype) - iprot.readFieldEnd() - iprot.readStructEnd() - - def write(self, oprot): - oprot.writeStructBegin('TApplicationException') - if self.message != None: - oprot.writeFieldBegin('message', TType.STRING, 1) - oprot.writeString(self.message) - oprot.writeFieldEnd() - if self.type != None: - oprot.writeFieldBegin('type', TType.I32, 2) - oprot.writeI32(self.type) - oprot.writeFieldEnd() - oprot.writeFieldStop() - oprot.writeStructEnd() diff --git a/module/lib/thrift/__init__.py b/module/lib/thrift/__init__.py deleted file mode 100644 index 48d659c40..000000000 --- a/module/lib/thrift/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -__all__ = ['Thrift', 'TSCons'] diff --git a/module/lib/thrift/protocol/TBase.py b/module/lib/thrift/protocol/TBase.py deleted file mode 100644 index e675c7dc0..000000000 --- a/module/lib/thrift/protocol/TBase.py +++ /dev/null @@ -1,72 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -from thrift.Thrift import * -from thrift.protocol import TBinaryProtocol -from thrift.transport import TTransport - -try: - from thrift.protocol import fastbinary -except: - fastbinary = None - -class TBase(object): - __slots__ = [] - - def __repr__(self): - L = ['%s=%r' % (key, getattr(self, key)) - for key in self.__slots__ ] - return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return False - for attr in self.__slots__: - my_val = getattr(self, attr) - other_val = getattr(other, attr) - if my_val != other_val: - return False - return True - - def __ne__(self, other): - return not (self == other) - - def read(self, iprot): - if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: - fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) - return - iprot.readStruct(self, self.thrift_spec) - - def write(self, oprot): - if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: - oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) - return - oprot.writeStruct(self, self.thrift_spec) - -class TExceptionBase(Exception): - # old style class so python2.4 can raise exceptions derived from this - # This can't inherit from TBase because of that limitation. - __slots__ = [] - - __repr__ = TBase.__repr__.im_func - __eq__ = TBase.__eq__.im_func - __ne__ = TBase.__ne__.im_func - read = TBase.read.im_func - write = TBase.write.im_func - diff --git a/module/lib/thrift/protocol/TBinaryProtocol.py b/module/lib/thrift/protocol/TBinaryProtocol.py deleted file mode 100644 index 50c6aa896..000000000 --- a/module/lib/thrift/protocol/TBinaryProtocol.py +++ /dev/null @@ -1,259 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -from TProtocol import * -from struct import pack, unpack - -class TBinaryProtocol(TProtocolBase): - - """Binary implementation of the Thrift protocol driver.""" - - # NastyHaxx. Python 2.4+ on 32-bit machines forces hex constants to be - # positive, converting this into a long. If we hardcode the int value - # instead it'll stay in 32 bit-land. - - # VERSION_MASK = 0xffff0000 - VERSION_MASK = -65536 - - # VERSION_1 = 0x80010000 - VERSION_1 = -2147418112 - - TYPE_MASK = 0x000000ff - - def __init__(self, trans, strictRead=False, strictWrite=True): - TProtocolBase.__init__(self, trans) - self.strictRead = strictRead - self.strictWrite = strictWrite - - def writeMessageBegin(self, name, type, seqid): - if self.strictWrite: - self.writeI32(TBinaryProtocol.VERSION_1 | type) - self.writeString(name) - self.writeI32(seqid) - else: - self.writeString(name) - self.writeByte(type) - self.writeI32(seqid) - - def writeMessageEnd(self): - pass - - def writeStructBegin(self, name): - pass - - def writeStructEnd(self): - pass - - def writeFieldBegin(self, name, type, id): - self.writeByte(type) - self.writeI16(id) - - def writeFieldEnd(self): - pass - - def writeFieldStop(self): - self.writeByte(TType.STOP); - - def writeMapBegin(self, ktype, vtype, size): - self.writeByte(ktype) - self.writeByte(vtype) - self.writeI32(size) - - def writeMapEnd(self): - pass - - def writeListBegin(self, etype, size): - self.writeByte(etype) - self.writeI32(size) - - def writeListEnd(self): - pass - - def writeSetBegin(self, etype, size): - self.writeByte(etype) - self.writeI32(size) - - def writeSetEnd(self): - pass - - def writeBool(self, bool): - if bool: - self.writeByte(1) - else: - self.writeByte(0) - - def writeByte(self, byte): - buff = pack("!b", byte) - self.trans.write(buff) - - def writeI16(self, i16): - buff = pack("!h", i16) - self.trans.write(buff) - - def writeI32(self, i32): - buff = pack("!i", i32) - self.trans.write(buff) - - def writeI64(self, i64): - buff = pack("!q", i64) - self.trans.write(buff) - - def writeDouble(self, dub): - buff = pack("!d", dub) - self.trans.write(buff) - - def writeString(self, str): - self.writeI32(len(str)) - self.trans.write(str) - - def readMessageBegin(self): - sz = self.readI32() - if sz < 0: - version = sz & TBinaryProtocol.VERSION_MASK - if version != TBinaryProtocol.VERSION_1: - raise TProtocolException(type=TProtocolException.BAD_VERSION, message='Bad version in readMessageBegin: %d' % (sz)) - type = sz & TBinaryProtocol.TYPE_MASK - name = self.readString() - seqid = self.readI32() - else: - if self.strictRead: - raise TProtocolException(type=TProtocolException.BAD_VERSION, message='No protocol version header') - name = self.trans.readAll(sz) - type = self.readByte() - seqid = self.readI32() - return (name, type, seqid) - - def readMessageEnd(self): - pass - - def readStructBegin(self): - pass - - def readStructEnd(self): - pass - - def readFieldBegin(self): - type = self.readByte() - if type == TType.STOP: - return (None, type, 0) - id = self.readI16() - return (None, type, id) - - def readFieldEnd(self): - pass - - def readMapBegin(self): - ktype = self.readByte() - vtype = self.readByte() - size = self.readI32() - return (ktype, vtype, size) - - def readMapEnd(self): - pass - - def readListBegin(self): - etype = self.readByte() - size = self.readI32() - return (etype, size) - - def readListEnd(self): - pass - - def readSetBegin(self): - etype = self.readByte() - size = self.readI32() - return (etype, size) - - def readSetEnd(self): - pass - - def readBool(self): - byte = self.readByte() - if byte == 0: - return False - return True - - def readByte(self): - buff = self.trans.readAll(1) - val, = unpack('!b', buff) - return val - - def readI16(self): - buff = self.trans.readAll(2) - val, = unpack('!h', buff) - return val - - def readI32(self): - buff = self.trans.readAll(4) - val, = unpack('!i', buff) - return val - - def readI64(self): - buff = self.trans.readAll(8) - val, = unpack('!q', buff) - return val - - def readDouble(self): - buff = self.trans.readAll(8) - val, = unpack('!d', buff) - return val - - def readString(self): - len = self.readI32() - str = self.trans.readAll(len) - return str - - -class TBinaryProtocolFactory: - def __init__(self, strictRead=False, strictWrite=True): - self.strictRead = strictRead - self.strictWrite = strictWrite - - def getProtocol(self, trans): - prot = TBinaryProtocol(trans, self.strictRead, self.strictWrite) - return prot - - -class TBinaryProtocolAccelerated(TBinaryProtocol): - - """C-Accelerated version of TBinaryProtocol. - - This class does not override any of TBinaryProtocol's methods, - but the generated code recognizes it directly and will call into - our C module to do the encoding, bypassing this object entirely. - We inherit from TBinaryProtocol so that the normal TBinaryProtocol - encoding can happen if the fastbinary module doesn't work for some - reason. (TODO(dreiss): Make this happen sanely in more cases.) - - In order to take advantage of the C module, just use - TBinaryProtocolAccelerated instead of TBinaryProtocol. - - NOTE: This code was contributed by an external developer. - The internal Thrift team has reviewed and tested it, - but we cannot guarantee that it is production-ready. - Please feel free to report bugs and/or success stories - to the public mailing list. - """ - - pass - - -class TBinaryProtocolAcceleratedFactory: - def getProtocol(self, trans): - return TBinaryProtocolAccelerated(trans) diff --git a/module/lib/thrift/protocol/TCompactProtocol.py b/module/lib/thrift/protocol/TCompactProtocol.py deleted file mode 100644 index 016a33171..000000000 --- a/module/lib/thrift/protocol/TCompactProtocol.py +++ /dev/null @@ -1,395 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -from TProtocol import * -from struct import pack, unpack - -__all__ = ['TCompactProtocol', 'TCompactProtocolFactory'] - -CLEAR = 0 -FIELD_WRITE = 1 -VALUE_WRITE = 2 -CONTAINER_WRITE = 3 -BOOL_WRITE = 4 -FIELD_READ = 5 -CONTAINER_READ = 6 -VALUE_READ = 7 -BOOL_READ = 8 - -def make_helper(v_from, container): - def helper(func): - def nested(self, *args, **kwargs): - assert self.state in (v_from, container), (self.state, v_from, container) - return func(self, *args, **kwargs) - return nested - return helper -writer = make_helper(VALUE_WRITE, CONTAINER_WRITE) -reader = make_helper(VALUE_READ, CONTAINER_READ) - -def makeZigZag(n, bits): - return (n << 1) ^ (n >> (bits - 1)) - -def fromZigZag(n): - return (n >> 1) ^ -(n & 1) - -def writeVarint(trans, n): - out = [] - while True: - if n & ~0x7f == 0: - out.append(n) - break - else: - out.append((n & 0xff) | 0x80) - n = n >> 7 - trans.write(''.join(map(chr, out))) - -def readVarint(trans): - result = 0 - shift = 0 - while True: - x = trans.readAll(1) - byte = ord(x) - result |= (byte & 0x7f) << shift - if byte >> 7 == 0: - return result - shift += 7 - -class CompactType: - STOP = 0x00 - TRUE = 0x01 - FALSE = 0x02 - BYTE = 0x03 - I16 = 0x04 - I32 = 0x05 - I64 = 0x06 - DOUBLE = 0x07 - BINARY = 0x08 - LIST = 0x09 - SET = 0x0A - MAP = 0x0B - STRUCT = 0x0C - -CTYPES = {TType.STOP: CompactType.STOP, - TType.BOOL: CompactType.TRUE, # used for collection - TType.BYTE: CompactType.BYTE, - TType.I16: CompactType.I16, - TType.I32: CompactType.I32, - TType.I64: CompactType.I64, - TType.DOUBLE: CompactType.DOUBLE, - TType.STRING: CompactType.BINARY, - TType.STRUCT: CompactType.STRUCT, - TType.LIST: CompactType.LIST, - TType.SET: CompactType.SET, - TType.MAP: CompactType.MAP - } - -TTYPES = {} -for k, v in CTYPES.items(): - TTYPES[v] = k -TTYPES[CompactType.FALSE] = TType.BOOL -del k -del v - -class TCompactProtocol(TProtocolBase): - "Compact implementation of the Thrift protocol driver." - - PROTOCOL_ID = 0x82 - VERSION = 1 - VERSION_MASK = 0x1f - TYPE_MASK = 0xe0 - TYPE_SHIFT_AMOUNT = 5 - - def __init__(self, trans): - TProtocolBase.__init__(self, trans) - self.state = CLEAR - self.__last_fid = 0 - self.__bool_fid = None - self.__bool_value = None - self.__structs = [] - self.__containers = [] - - def __writeVarint(self, n): - writeVarint(self.trans, n) - - def writeMessageBegin(self, name, type, seqid): - assert self.state == CLEAR - self.__writeUByte(self.PROTOCOL_ID) - self.__writeUByte(self.VERSION | (type << self.TYPE_SHIFT_AMOUNT)) - self.__writeVarint(seqid) - self.__writeString(name) - self.state = VALUE_WRITE - - def writeMessageEnd(self): - assert self.state == VALUE_WRITE - self.state = CLEAR - - def writeStructBegin(self, name): - assert self.state in (CLEAR, CONTAINER_WRITE, VALUE_WRITE), self.state - self.__structs.append((self.state, self.__last_fid)) - self.state = FIELD_WRITE - self.__last_fid = 0 - - def writeStructEnd(self): - assert self.state == FIELD_WRITE - self.state, self.__last_fid = self.__structs.pop() - - def writeFieldStop(self): - self.__writeByte(0) - - def __writeFieldHeader(self, type, fid): - delta = fid - self.__last_fid - if 0 < delta <= 15: - self.__writeUByte(delta << 4 | type) - else: - self.__writeByte(type) - self.__writeI16(fid) - self.__last_fid = fid - - def writeFieldBegin(self, name, type, fid): - assert self.state == FIELD_WRITE, self.state - if type == TType.BOOL: - self.state = BOOL_WRITE - self.__bool_fid = fid - else: - self.state = VALUE_WRITE - self.__writeFieldHeader(CTYPES[type], fid) - - def writeFieldEnd(self): - assert self.state in (VALUE_WRITE, BOOL_WRITE), self.state - self.state = FIELD_WRITE - - def __writeUByte(self, byte): - self.trans.write(pack('!B', byte)) - - def __writeByte(self, byte): - self.trans.write(pack('!b', byte)) - - def __writeI16(self, i16): - self.__writeVarint(makeZigZag(i16, 16)) - - def __writeSize(self, i32): - self.__writeVarint(i32) - - def writeCollectionBegin(self, etype, size): - assert self.state in (VALUE_WRITE, CONTAINER_WRITE), self.state - if size <= 14: - self.__writeUByte(size << 4 | CTYPES[etype]) - else: - self.__writeUByte(0xf0 | CTYPES[etype]) - self.__writeSize(size) - self.__containers.append(self.state) - self.state = CONTAINER_WRITE - writeSetBegin = writeCollectionBegin - writeListBegin = writeCollectionBegin - - def writeMapBegin(self, ktype, vtype, size): - assert self.state in (VALUE_WRITE, CONTAINER_WRITE), self.state - if size == 0: - self.__writeByte(0) - else: - self.__writeSize(size) - self.__writeUByte(CTYPES[ktype] << 4 | CTYPES[vtype]) - self.__containers.append(self.state) - self.state = CONTAINER_WRITE - - def writeCollectionEnd(self): - assert self.state == CONTAINER_WRITE, self.state - self.state = self.__containers.pop() - writeMapEnd = writeCollectionEnd - writeSetEnd = writeCollectionEnd - writeListEnd = writeCollectionEnd - - def writeBool(self, bool): - if self.state == BOOL_WRITE: - if bool: - ctype = CompactType.TRUE - else: - ctype = CompactType.FALSE - self.__writeFieldHeader(ctype, self.__bool_fid) - elif self.state == CONTAINER_WRITE: - if bool: - self.__writeByte(CompactType.TRUE) - else: - self.__writeByte(CompactType.FALSE) - else: - raise AssertionError, "Invalid state in compact protocol" - - writeByte = writer(__writeByte) - writeI16 = writer(__writeI16) - - @writer - def writeI32(self, i32): - self.__writeVarint(makeZigZag(i32, 32)) - - @writer - def writeI64(self, i64): - self.__writeVarint(makeZigZag(i64, 64)) - - @writer - def writeDouble(self, dub): - self.trans.write(pack('!d', dub)) - - def __writeString(self, s): - self.__writeSize(len(s)) - self.trans.write(s) - writeString = writer(__writeString) - - def readFieldBegin(self): - assert self.state == FIELD_READ, self.state - type = self.__readUByte() - if type & 0x0f == TType.STOP: - return (None, 0, 0) - delta = type >> 4 - if delta == 0: - fid = self.__readI16() - else: - fid = self.__last_fid + delta - self.__last_fid = fid - type = type & 0x0f - if type == CompactType.TRUE: - self.state = BOOL_READ - self.__bool_value = True - elif type == CompactType.FALSE: - self.state = BOOL_READ - self.__bool_value = False - else: - self.state = VALUE_READ - return (None, self.__getTType(type), fid) - - def readFieldEnd(self): - assert self.state in (VALUE_READ, BOOL_READ), self.state - self.state = FIELD_READ - - def __readUByte(self): - result, = unpack('!B', self.trans.readAll(1)) - return result - - def __readByte(self): - result, = unpack('!b', self.trans.readAll(1)) - return result - - def __readVarint(self): - return readVarint(self.trans) - - def __readZigZag(self): - return fromZigZag(self.__readVarint()) - - def __readSize(self): - result = self.__readVarint() - if result < 0: - raise TException("Length < 0") - return result - - def readMessageBegin(self): - assert self.state == CLEAR - proto_id = self.__readUByte() - if proto_id != self.PROTOCOL_ID: - raise TProtocolException(TProtocolException.BAD_VERSION, - 'Bad protocol id in the message: %d' % proto_id) - ver_type = self.__readUByte() - type = (ver_type & self.TYPE_MASK) >> self.TYPE_SHIFT_AMOUNT - version = ver_type & self.VERSION_MASK - if version != self.VERSION: - raise TProtocolException(TProtocolException.BAD_VERSION, - 'Bad version: %d (expect %d)' % (version, self.VERSION)) - seqid = self.__readVarint() - name = self.__readString() - return (name, type, seqid) - - def readMessageEnd(self): - assert self.state == CLEAR - assert len(self.__structs) == 0 - - def readStructBegin(self): - assert self.state in (CLEAR, CONTAINER_READ, VALUE_READ), self.state - self.__structs.append((self.state, self.__last_fid)) - self.state = FIELD_READ - self.__last_fid = 0 - - def readStructEnd(self): - assert self.state == FIELD_READ - self.state, self.__last_fid = self.__structs.pop() - - def readCollectionBegin(self): - assert self.state in (VALUE_READ, CONTAINER_READ), self.state - size_type = self.__readUByte() - size = size_type >> 4 - type = self.__getTType(size_type) - if size == 15: - size = self.__readSize() - self.__containers.append(self.state) - self.state = CONTAINER_READ - return type, size - readSetBegin = readCollectionBegin - readListBegin = readCollectionBegin - - def readMapBegin(self): - assert self.state in (VALUE_READ, CONTAINER_READ), self.state - size = self.__readSize() - types = 0 - if size > 0: - types = self.__readUByte() - vtype = self.__getTType(types) - ktype = self.__getTType(types >> 4) - self.__containers.append(self.state) - self.state = CONTAINER_READ - return (ktype, vtype, size) - - def readCollectionEnd(self): - assert self.state == CONTAINER_READ, self.state - self.state = self.__containers.pop() - readSetEnd = readCollectionEnd - readListEnd = readCollectionEnd - readMapEnd = readCollectionEnd - - def readBool(self): - if self.state == BOOL_READ: - return self.__bool_value == CompactType.TRUE - elif self.state == CONTAINER_READ: - return self.__readByte() == CompactType.TRUE - else: - raise AssertionError, "Invalid state in compact protocol: %d" % self.state - - readByte = reader(__readByte) - __readI16 = __readZigZag - readI16 = reader(__readZigZag) - readI32 = reader(__readZigZag) - readI64 = reader(__readZigZag) - - @reader - def readDouble(self): - buff = self.trans.readAll(8) - val, = unpack('!d', buff) - return val - - def __readString(self): - len = self.__readSize() - return self.trans.readAll(len) - readString = reader(__readString) - - def __getTType(self, byte): - return TTYPES[byte & 0x0f] - - -class TCompactProtocolFactory: - def __init__(self): - pass - - def getProtocol(self, trans): - return TCompactProtocol(trans) diff --git a/module/lib/thrift/protocol/TProtocol.py b/module/lib/thrift/protocol/TProtocol.py deleted file mode 100644 index 7338ff68a..000000000 --- a/module/lib/thrift/protocol/TProtocol.py +++ /dev/null @@ -1,404 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -from thrift.Thrift import * - -class TProtocolException(TException): - - """Custom Protocol Exception class""" - - UNKNOWN = 0 - INVALID_DATA = 1 - NEGATIVE_SIZE = 2 - SIZE_LIMIT = 3 - BAD_VERSION = 4 - - def __init__(self, type=UNKNOWN, message=None): - TException.__init__(self, message) - self.type = type - -class TProtocolBase: - - """Base class for Thrift protocol driver.""" - - def __init__(self, trans): - self.trans = trans - - def writeMessageBegin(self, name, type, seqid): - pass - - def writeMessageEnd(self): - pass - - def writeStructBegin(self, name): - pass - - def writeStructEnd(self): - pass - - def writeFieldBegin(self, name, type, id): - pass - - def writeFieldEnd(self): - pass - - def writeFieldStop(self): - pass - - def writeMapBegin(self, ktype, vtype, size): - pass - - def writeMapEnd(self): - pass - - def writeListBegin(self, etype, size): - pass - - def writeListEnd(self): - pass - - def writeSetBegin(self, etype, size): - pass - - def writeSetEnd(self): - pass - - def writeBool(self, bool): - pass - - def writeByte(self, byte): - pass - - def writeI16(self, i16): - pass - - def writeI32(self, i32): - pass - - def writeI64(self, i64): - pass - - def writeDouble(self, dub): - pass - - def writeString(self, str): - pass - - def readMessageBegin(self): - pass - - def readMessageEnd(self): - pass - - def readStructBegin(self): - pass - - def readStructEnd(self): - pass - - def readFieldBegin(self): - pass - - def readFieldEnd(self): - pass - - def readMapBegin(self): - pass - - def readMapEnd(self): - pass - - def readListBegin(self): - pass - - def readListEnd(self): - pass - - def readSetBegin(self): - pass - - def readSetEnd(self): - pass - - def readBool(self): - pass - - def readByte(self): - pass - - def readI16(self): - pass - - def readI32(self): - pass - - def readI64(self): - pass - - def readDouble(self): - pass - - def readString(self): - pass - - def skip(self, type): - if type == TType.STOP: - return - elif type == TType.BOOL: - self.readBool() - elif type == TType.BYTE: - self.readByte() - elif type == TType.I16: - self.readI16() - elif type == TType.I32: - self.readI32() - elif type == TType.I64: - self.readI64() - elif type == TType.DOUBLE: - self.readDouble() - elif type == TType.STRING: - self.readString() - elif type == TType.STRUCT: - name = self.readStructBegin() - while True: - (name, type, id) = self.readFieldBegin() - if type == TType.STOP: - break - self.skip(type) - self.readFieldEnd() - self.readStructEnd() - elif type == TType.MAP: - (ktype, vtype, size) = self.readMapBegin() - for i in range(size): - self.skip(ktype) - self.skip(vtype) - self.readMapEnd() - elif type == TType.SET: - (etype, size) = self.readSetBegin() - for i in range(size): - self.skip(etype) - self.readSetEnd() - elif type == TType.LIST: - (etype, size) = self.readListBegin() - for i in range(size): - self.skip(etype) - self.readListEnd() - - # tuple of: ( 'reader method' name, is_container boolean, 'writer_method' name ) - _TTYPE_HANDLERS = ( - (None, None, False), # 0 == TType,STOP - (None, None, False), # 1 == TType.VOID # TODO: handle void? - ('readBool', 'writeBool', False), # 2 == TType.BOOL - ('readByte', 'writeByte', False), # 3 == TType.BYTE and I08 - ('readDouble', 'writeDouble', False), # 4 == TType.DOUBLE - (None, None, False), # 5, undefined - ('readI16', 'writeI16', False), # 6 == TType.I16 - (None, None, False), # 7, undefined - ('readI32', 'writeI32', False), # 8 == TType.I32 - (None, None, False), # 9, undefined - ('readI64', 'writeI64', False), # 10 == TType.I64 - ('readString', 'writeString', False), # 11 == TType.STRING and UTF7 - ('readContainerStruct', 'writeContainerStruct', True), # 12 == TType.STRUCT - ('readContainerMap', 'writeContainerMap', True), # 13 == TType.MAP - ('readContainerSet', 'writeContainerSet', True), # 14 == TType.SET - ('readContainerList', 'writeContainerList', True), # 15 == TType.LIST - (None, None, False), # 16 == TType.UTF8 # TODO: handle utf8 types? - (None, None, False)# 17 == TType.UTF16 # TODO: handle utf16 types? - ) - - def readFieldByTType(self, ttype, spec): - try: - (r_handler, w_handler, is_container) = self._TTYPE_HANDLERS[ttype] - except IndexError: - raise TProtocolException(type=TProtocolException.INVALID_DATA, - message='Invalid field type %d' % (ttype)) - if r_handler is None: - raise TProtocolException(type=TProtocolException.INVALID_DATA, - message='Invalid field type %d' % (ttype)) - reader = getattr(self, r_handler) - if not is_container: - return reader() - return reader(spec) - - def readContainerList(self, spec): - results = [] - ttype, tspec = spec[0], spec[1] - r_handler = self._TTYPE_HANDLERS[ttype][0] - reader = getattr(self, r_handler) - (list_type, list_len) = self.readListBegin() - if tspec is None: - # list values are simple types - for idx in xrange(list_len): - results.append(reader()) - else: - # this is like an inlined readFieldByTType - container_reader = self._TTYPE_HANDLERS[list_type][0] - val_reader = getattr(self, container_reader) - for idx in xrange(list_len): - val = val_reader(tspec) - results.append(val) - self.readListEnd() - return results - - def readContainerSet(self, spec): - results = set() - ttype, tspec = spec[0], spec[1] - r_handler = self._TTYPE_HANDLERS[ttype][0] - reader = getattr(self, r_handler) - (set_type, set_len) = self.readSetBegin() - if tspec is None: - # set members are simple types - for idx in xrange(set_len): - results.add(reader()) - else: - container_reader = self._TTYPE_HANDLERS[set_type][0] - val_reader = getattr(self, container_reader) - for idx in xrange(set_len): - results.add(val_reader(tspec)) - self.readSetEnd() - return results - - def readContainerStruct(self, spec): - (obj_class, obj_spec) = spec - obj = obj_class() - obj.read(self) - return obj - - def readContainerMap(self, spec): - results = dict() - key_ttype, key_spec = spec[0], spec[1] - val_ttype, val_spec = spec[2], spec[3] - (map_ktype, map_vtype, map_len) = self.readMapBegin() - # TODO: compare types we just decoded with thrift_spec and abort/skip if types disagree - key_reader = getattr(self, self._TTYPE_HANDLERS[key_ttype][0]) - val_reader = getattr(self, self._TTYPE_HANDLERS[val_ttype][0]) - # list values are simple types - for idx in xrange(map_len): - if key_spec is None: - k_val = key_reader() - else: - k_val = self.readFieldByTType(key_ttype, key_spec) - if val_spec is None: - v_val = val_reader() - else: - v_val = self.readFieldByTType(val_ttype, val_spec) - # this raises a TypeError with unhashable keys types. i.e. d=dict(); d[[0,1]] = 2 fails - results[k_val] = v_val - self.readMapEnd() - return results - - def readStruct(self, obj, thrift_spec): - self.readStructBegin() - while True: - (fname, ftype, fid) = self.readFieldBegin() - if ftype == TType.STOP: - break - try: - field = thrift_spec[fid] - except IndexError: - self.skip(ftype) - else: - if field is not None and ftype == field[1]: - fname = field[2] - fspec = field[3] - val = self.readFieldByTType(ftype, fspec) - setattr(obj, fname, val) - else: - self.skip(ftype) - self.readFieldEnd() - self.readStructEnd() - - def writeContainerStruct(self, val, spec): - val.write(self) - - def writeContainerList(self, val, spec): - self.writeListBegin(spec[0], len(val)) - r_handler, w_handler, is_container = self._TTYPE_HANDLERS[spec[0]] - e_writer = getattr(self, w_handler) - if not is_container: - for elem in val: - e_writer(elem) - else: - for elem in val: - e_writer(elem, spec[1]) - self.writeListEnd() - - def writeContainerSet(self, val, spec): - self.writeSetBegin(spec[0], len(val)) - r_handler, w_handler, is_container = self._TTYPE_HANDLERS[spec[0]] - e_writer = getattr(self, w_handler) - if not is_container: - for elem in val: - e_writer(elem) - else: - for elem in val: - e_writer(elem, spec[1]) - self.writeSetEnd() - - def writeContainerMap(self, val, spec): - k_type = spec[0] - v_type = spec[2] - ignore, ktype_name, k_is_container = self._TTYPE_HANDLERS[k_type] - ignore, vtype_name, v_is_container = self._TTYPE_HANDLERS[v_type] - k_writer = getattr(self, ktype_name) - v_writer = getattr(self, vtype_name) - self.writeMapBegin(k_type, v_type, len(val)) - for m_key, m_val in val.iteritems(): - if not k_is_container: - k_writer(m_key) - else: - k_writer(m_key, spec[1]) - if not v_is_container: - v_writer(m_val) - else: - v_writer(m_val, spec[3]) - self.writeMapEnd() - - def writeStruct(self, obj, thrift_spec): - self.writeStructBegin(obj.__class__.__name__) - for field in thrift_spec: - if field is None: - continue - fname = field[2] - val = getattr(obj, fname) - if val is None: - # skip writing out unset fields - continue - fid = field[0] - ftype = field[1] - fspec = field[3] - # get the writer method for this value - self.writeFieldBegin(fname, ftype, fid) - self.writeFieldByTType(ftype, val, fspec) - self.writeFieldEnd() - self.writeFieldStop() - self.writeStructEnd() - - def writeFieldByTType(self, ttype, val, spec): - r_handler, w_handler, is_container = self._TTYPE_HANDLERS[ttype] - writer = getattr(self, w_handler) - if is_container: - writer(val, spec) - else: - writer(val) - -class TProtocolFactory: - def getProtocol(self, trans): - pass - diff --git a/module/lib/thrift/protocol/__init__.py b/module/lib/thrift/protocol/__init__.py deleted file mode 100644 index d53359b28..000000000 --- a/module/lib/thrift/protocol/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -__all__ = ['TProtocol', 'TBinaryProtocol', 'fastbinary', 'TBase'] diff --git a/module/lib/thrift/server/THttpServer.py b/module/lib/thrift/server/THttpServer.py deleted file mode 100644 index 3047d9c00..000000000 --- a/module/lib/thrift/server/THttpServer.py +++ /dev/null @@ -1,82 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -import BaseHTTPServer - -from thrift.server import TServer -from thrift.transport import TTransport - -class ResponseException(Exception): - """Allows handlers to override the HTTP response - - Normally, THttpServer always sends a 200 response. If a handler wants - to override this behavior (e.g., to simulate a misconfigured or - overloaded web server during testing), it can raise a ResponseException. - The function passed to the constructor will be called with the - RequestHandler as its only argument. - """ - def __init__(self, handler): - self.handler = handler - - -class THttpServer(TServer.TServer): - """A simple HTTP-based Thrift server - - This class is not very performant, but it is useful (for example) for - acting as a mock version of an Apache-based PHP Thrift endpoint.""" - - def __init__(self, processor, server_address, - inputProtocolFactory, outputProtocolFactory = None, - server_class = BaseHTTPServer.HTTPServer): - """Set up protocol factories and HTTP server. - - See BaseHTTPServer for server_address. - See TServer for protocol factories.""" - - if outputProtocolFactory is None: - outputProtocolFactory = inputProtocolFactory - - TServer.TServer.__init__(self, processor, None, None, None, - inputProtocolFactory, outputProtocolFactory) - - thttpserver = self - - class RequestHander(BaseHTTPServer.BaseHTTPRequestHandler): - def do_POST(self): - # Don't care about the request path. - itrans = TTransport.TFileObjectTransport(self.rfile) - otrans = TTransport.TFileObjectTransport(self.wfile) - itrans = TTransport.TBufferedTransport(itrans, int(self.headers['Content-Length'])) - otrans = TTransport.TMemoryBuffer() - iprot = thttpserver.inputProtocolFactory.getProtocol(itrans) - oprot = thttpserver.outputProtocolFactory.getProtocol(otrans) - try: - thttpserver.processor.process(iprot, oprot) - except ResponseException, exn: - exn.handler(self) - else: - self.send_response(200) - self.send_header("content-type", "application/x-thrift") - self.end_headers() - self.wfile.write(otrans.getvalue()) - - self.httpd = server_class(server_address, RequestHander) - - def serve(self): - self.httpd.serve_forever() diff --git a/module/lib/thrift/server/TNonblockingServer.py b/module/lib/thrift/server/TNonblockingServer.py deleted file mode 100644 index ea348a0b6..000000000 --- a/module/lib/thrift/server/TNonblockingServer.py +++ /dev/null @@ -1,310 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -"""Implementation of non-blocking server. - -The main idea of the server is reciving and sending requests -only from main thread. - -It also makes thread pool server in tasks terms, not connections. -""" -import threading -import socket -import Queue -import select -import struct -import logging - -from thrift.transport import TTransport -from thrift.protocol.TBinaryProtocol import TBinaryProtocolFactory - -__all__ = ['TNonblockingServer'] - -class Worker(threading.Thread): - """Worker is a small helper to process incoming connection.""" - def __init__(self, queue): - threading.Thread.__init__(self) - self.queue = queue - - def run(self): - """Process queries from task queue, stop if processor is None.""" - while True: - try: - processor, iprot, oprot, otrans, callback = self.queue.get() - if processor is None: - break - processor.process(iprot, oprot) - callback(True, otrans.getvalue()) - except Exception: - logging.exception("Exception while processing request") - callback(False, '') - -WAIT_LEN = 0 -WAIT_MESSAGE = 1 -WAIT_PROCESS = 2 -SEND_ANSWER = 3 -CLOSED = 4 - -def locked(func): - "Decorator which locks self.lock." - def nested(self, *args, **kwargs): - self.lock.acquire() - try: - return func(self, *args, **kwargs) - finally: - self.lock.release() - return nested - -def socket_exception(func): - "Decorator close object on socket.error." - def read(self, *args, **kwargs): - try: - return func(self, *args, **kwargs) - except socket.error: - self.close() - return read - -class Connection: - """Basic class is represented connection. - - It can be in state: - WAIT_LEN --- connection is reading request len. - WAIT_MESSAGE --- connection is reading request. - WAIT_PROCESS --- connection has just read whole request and - waits for call ready routine. - SEND_ANSWER --- connection is sending answer string (including length - of answer). - CLOSED --- socket was closed and connection should be deleted. - """ - def __init__(self, new_socket, wake_up): - self.socket = new_socket - self.socket.setblocking(False) - self.status = WAIT_LEN - self.len = 0 - self.message = '' - self.lock = threading.Lock() - self.wake_up = wake_up - - def _read_len(self): - """Reads length of request. - - It's really paranoic routine and it may be replaced by - self.socket.recv(4).""" - read = self.socket.recv(4 - len(self.message)) - if len(read) == 0: - # if we read 0 bytes and self.message is empty, it means client close - # connection - if len(self.message) != 0: - logging.error("can't read frame size from socket") - self.close() - return - self.message += read - if len(self.message) == 4: - self.len, = struct.unpack('!i', self.message) - if self.len < 0: - logging.error("negative frame size, it seems client"\ - " doesn't use FramedTransport") - self.close() - elif self.len == 0: - logging.error("empty frame, it's really strange") - self.close() - else: - self.message = '' - self.status = WAIT_MESSAGE - - @socket_exception - def read(self): - """Reads data from stream and switch state.""" - assert self.status in (WAIT_LEN, WAIT_MESSAGE) - if self.status == WAIT_LEN: - self._read_len() - # go back to the main loop here for simplicity instead of - # falling through, even though there is a good chance that - # the message is already available - elif self.status == WAIT_MESSAGE: - read = self.socket.recv(self.len - len(self.message)) - if len(read) == 0: - logging.error("can't read frame from socket (get %d of %d bytes)" % - (len(self.message), self.len)) - self.close() - return - self.message += read - if len(self.message) == self.len: - self.status = WAIT_PROCESS - - @socket_exception - def write(self): - """Writes data from socket and switch state.""" - assert self.status == SEND_ANSWER - sent = self.socket.send(self.message) - if sent == len(self.message): - self.status = WAIT_LEN - self.message = '' - self.len = 0 - else: - self.message = self.message[sent:] - - @locked - def ready(self, all_ok, message): - """Callback function for switching state and waking up main thread. - - This function is the only function witch can be called asynchronous. - - The ready can switch Connection to three states: - WAIT_LEN if request was oneway. - SEND_ANSWER if request was processed in normal way. - CLOSED if request throws unexpected exception. - - The one wakes up main thread. - """ - assert self.status == WAIT_PROCESS - if not all_ok: - self.close() - self.wake_up() - return - self.len = '' - if len(message) == 0: - # it was a oneway request, do not write answer - self.message = '' - self.status = WAIT_LEN - else: - self.message = struct.pack('!i', len(message)) + message - self.status = SEND_ANSWER - self.wake_up() - - @locked - def is_writeable(self): - "Returns True if connection should be added to write list of select." - return self.status == SEND_ANSWER - - # it's not necessary, but... - @locked - def is_readable(self): - "Returns True if connection should be added to read list of select." - return self.status in (WAIT_LEN, WAIT_MESSAGE) - - @locked - def is_closed(self): - "Returns True if connection is closed." - return self.status == CLOSED - - def fileno(self): - "Returns the file descriptor of the associated socket." - return self.socket.fileno() - - def close(self): - "Closes connection" - self.status = CLOSED - self.socket.close() - -class TNonblockingServer: - """Non-blocking server.""" - def __init__(self, processor, lsocket, inputProtocolFactory=None, - outputProtocolFactory=None, threads=10): - self.processor = processor - self.socket = lsocket - self.in_protocol = inputProtocolFactory or TBinaryProtocolFactory() - self.out_protocol = outputProtocolFactory or self.in_protocol - self.threads = int(threads) - self.clients = {} - self.tasks = Queue.Queue() - self._read, self._write = socket.socketpair() - self.prepared = False - - def setNumThreads(self, num): - """Set the number of worker threads that should be created.""" - # implement ThreadPool interface - assert not self.prepared, "You can't change number of threads for working server" - self.threads = num - - def prepare(self): - """Prepares server for serve requests.""" - self.socket.listen() - for _ in xrange(self.threads): - thread = Worker(self.tasks) - thread.setDaemon(True) - thread.start() - self.prepared = True - - def wake_up(self): - """Wake up main thread. - - The server usualy waits in select call in we should terminate one. - The simplest way is using socketpair. - - Select always wait to read from the first socket of socketpair. - - In this case, we can just write anything to the second socket from - socketpair.""" - self._write.send('1') - - def _select(self): - """Does select on open connections.""" - readable = [self.socket.handle.fileno(), self._read.fileno()] - writable = [] - for i, connection in self.clients.items(): - if connection.is_readable(): - readable.append(connection.fileno()) - if connection.is_writeable(): - writable.append(connection.fileno()) - if connection.is_closed(): - del self.clients[i] - return select.select(readable, writable, readable) - - def handle(self): - """Handle requests. - - WARNING! You must call prepare BEFORE calling handle. - """ - assert self.prepared, "You have to call prepare before handle" - rset, wset, xset = self._select() - for readable in rset: - if readable == self._read.fileno(): - # don't care i just need to clean readable flag - self._read.recv(1024) - elif readable == self.socket.handle.fileno(): - client = self.socket.accept().handle - self.clients[client.fileno()] = Connection(client, self.wake_up) - else: - connection = self.clients[readable] - connection.read() - if connection.status == WAIT_PROCESS: - itransport = TTransport.TMemoryBuffer(connection.message) - otransport = TTransport.TMemoryBuffer() - iprot = self.in_protocol.getProtocol(itransport) - oprot = self.out_protocol.getProtocol(otransport) - self.tasks.put([self.processor, iprot, oprot, - otransport, connection.ready]) - for writeable in wset: - self.clients[writeable].write() - for oob in xset: - self.clients[oob].close() - del self.clients[oob] - - def close(self): - """Closes the server.""" - for _ in xrange(self.threads): - self.tasks.put([None, None, None, None, None]) - self.socket.close() - self.prepared = False - - def serve(self): - """Serve forever.""" - self.prepare() - while True: - self.handle() diff --git a/module/lib/thrift/server/TProcessPoolServer.py b/module/lib/thrift/server/TProcessPoolServer.py deleted file mode 100644 index 7ed814a88..000000000 --- a/module/lib/thrift/server/TProcessPoolServer.py +++ /dev/null @@ -1,125 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - - -import logging -from multiprocessing import Process, Value, Condition, reduction - -from TServer import TServer -from thrift.transport.TTransport import TTransportException - -class TProcessPoolServer(TServer): - - """ - Server with a fixed size pool of worker subprocesses which service requests. - Note that if you need shared state between the handlers - it's up to you! - Written by Dvir Volk, doat.com - """ - - def __init__(self, * args): - TServer.__init__(self, *args) - self.numWorkers = 10 - self.workers = [] - self.isRunning = Value('b', False) - self.stopCondition = Condition() - self.postForkCallback = None - - def setPostForkCallback(self, callback): - if not callable(callback): - raise TypeError("This is not a callback!") - self.postForkCallback = callback - - def setNumWorkers(self, num): - """Set the number of worker threads that should be created""" - self.numWorkers = num - - def workerProcess(self): - """Loop around getting clients from the shared queue and process them.""" - - if self.postForkCallback: - self.postForkCallback() - - while self.isRunning.value == True: - try: - client = self.serverTransport.accept() - self.serveClient(client) - except (KeyboardInterrupt, SystemExit): - return 0 - except Exception, x: - logging.exception(x) - - def serveClient(self, client): - """Process input/output from a client for as long as possible""" - itrans = self.inputTransportFactory.getTransport(client) - otrans = self.outputTransportFactory.getTransport(client) - iprot = self.inputProtocolFactory.getProtocol(itrans) - oprot = self.outputProtocolFactory.getProtocol(otrans) - - try: - while True: - self.processor.process(iprot, oprot) - except TTransportException, tx: - pass - except Exception, x: - logging.exception(x) - - itrans.close() - otrans.close() - - - def serve(self): - """Start a fixed number of worker threads and put client into a queue""" - - #this is a shared state that can tell the workers to exit when set as false - self.isRunning.value = True - - #first bind and listen to the port - self.serverTransport.listen() - - #fork the children - for i in range(self.numWorkers): - try: - w = Process(target=self.workerProcess) - w.daemon = True - w.start() - self.workers.append(w) - except Exception, x: - logging.exception(x) - - #wait until the condition is set by stop() - - while True: - - self.stopCondition.acquire() - try: - self.stopCondition.wait() - break - except (SystemExit, KeyboardInterrupt): - break - except Exception, x: - logging.exception(x) - - self.isRunning.value = False - - def stop(self): - self.isRunning.value = False - self.stopCondition.acquire() - self.stopCondition.notify() - self.stopCondition.release() - diff --git a/module/lib/thrift/server/TServer.py b/module/lib/thrift/server/TServer.py deleted file mode 100644 index 8456e2d40..000000000 --- a/module/lib/thrift/server/TServer.py +++ /dev/null @@ -1,274 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -import logging -import sys -import os -import traceback -import threading -import Queue - -from thrift.Thrift import TProcessor -from thrift.transport import TTransport -from thrift.protocol import TBinaryProtocol - -class TServer: - - """Base interface for a server, which must have a serve method.""" - - """ 3 constructors for all servers: - 1) (processor, serverTransport) - 2) (processor, serverTransport, transportFactory, protocolFactory) - 3) (processor, serverTransport, - inputTransportFactory, outputTransportFactory, - inputProtocolFactory, outputProtocolFactory)""" - def __init__(self, *args): - if (len(args) == 2): - self.__initArgs__(args[0], args[1], - TTransport.TTransportFactoryBase(), - TTransport.TTransportFactoryBase(), - TBinaryProtocol.TBinaryProtocolFactory(), - TBinaryProtocol.TBinaryProtocolFactory()) - elif (len(args) == 4): - self.__initArgs__(args[0], args[1], args[2], args[2], args[3], args[3]) - elif (len(args) == 6): - self.__initArgs__(args[0], args[1], args[2], args[3], args[4], args[5]) - - def __initArgs__(self, processor, serverTransport, - inputTransportFactory, outputTransportFactory, - inputProtocolFactory, outputProtocolFactory): - self.processor = processor - self.serverTransport = serverTransport - self.inputTransportFactory = inputTransportFactory - self.outputTransportFactory = outputTransportFactory - self.inputProtocolFactory = inputProtocolFactory - self.outputProtocolFactory = outputProtocolFactory - - def serve(self): - pass - -class TSimpleServer(TServer): - - """Simple single-threaded server that just pumps around one transport.""" - - def __init__(self, *args): - TServer.__init__(self, *args) - - def serve(self): - self.serverTransport.listen() - while True: - client = self.serverTransport.accept() - itrans = self.inputTransportFactory.getTransport(client) - otrans = self.outputTransportFactory.getTransport(client) - iprot = self.inputProtocolFactory.getProtocol(itrans) - oprot = self.outputProtocolFactory.getProtocol(otrans) - try: - while True: - self.processor.process(iprot, oprot) - except TTransport.TTransportException, tx: - pass - except Exception, x: - logging.exception(x) - - itrans.close() - otrans.close() - -class TThreadedServer(TServer): - - """Threaded server that spawns a new thread per each connection.""" - - def __init__(self, *args, **kwargs): - TServer.__init__(self, *args) - self.daemon = kwargs.get("daemon", False) - - def serve(self): - self.serverTransport.listen() - while True: - try: - client = self.serverTransport.accept() - t = threading.Thread(target = self.handle, args=(client,)) - t.setDaemon(self.daemon) - t.start() - except KeyboardInterrupt: - raise - except Exception, x: - logging.exception(x) - - def handle(self, client): - itrans = self.inputTransportFactory.getTransport(client) - otrans = self.outputTransportFactory.getTransport(client) - iprot = self.inputProtocolFactory.getProtocol(itrans) - oprot = self.outputProtocolFactory.getProtocol(otrans) - try: - while True: - self.processor.process(iprot, oprot) - except TTransport.TTransportException, tx: - pass - except Exception, x: - logging.exception(x) - - itrans.close() - otrans.close() - -class TThreadPoolServer(TServer): - - """Server with a fixed size pool of threads which service requests.""" - - def __init__(self, *args, **kwargs): - TServer.__init__(self, *args) - self.clients = Queue.Queue() - self.threads = 10 - self.daemon = kwargs.get("daemon", False) - - def setNumThreads(self, num): - """Set the number of worker threads that should be created""" - self.threads = num - - def serveThread(self): - """Loop around getting clients from the shared queue and process them.""" - while True: - try: - client = self.clients.get() - self.serveClient(client) - except Exception, x: - logging.exception(x) - - def serveClient(self, client): - """Process input/output from a client for as long as possible""" - itrans = self.inputTransportFactory.getTransport(client) - otrans = self.outputTransportFactory.getTransport(client) - iprot = self.inputProtocolFactory.getProtocol(itrans) - oprot = self.outputProtocolFactory.getProtocol(otrans) - try: - while True: - self.processor.process(iprot, oprot) - except TTransport.TTransportException, tx: - pass - except Exception, x: - logging.exception(x) - - itrans.close() - otrans.close() - - def serve(self): - """Start a fixed number of worker threads and put client into a queue""" - for i in range(self.threads): - try: - t = threading.Thread(target = self.serveThread) - t.setDaemon(self.daemon) - t.start() - except Exception, x: - logging.exception(x) - - # Pump the socket for clients - self.serverTransport.listen() - while True: - try: - client = self.serverTransport.accept() - self.clients.put(client) - except Exception, x: - logging.exception(x) - - -class TForkingServer(TServer): - - """A Thrift server that forks a new process for each request""" - """ - This is more scalable than the threaded server as it does not cause - GIL contention. - - Note that this has different semantics from the threading server. - Specifically, updates to shared variables will no longer be shared. - It will also not work on windows. - - This code is heavily inspired by SocketServer.ForkingMixIn in the - Python stdlib. - """ - - def __init__(self, *args): - TServer.__init__(self, *args) - self.children = [] - - def serve(self): - def try_close(file): - try: - file.close() - except IOError, e: - logging.warning(e, exc_info=True) - - - self.serverTransport.listen() - while True: - client = self.serverTransport.accept() - try: - pid = os.fork() - - if pid: # parent - # add before collect, otherwise you race w/ waitpid - self.children.append(pid) - self.collect_children() - - # Parent must close socket or the connection may not get - # closed promptly - itrans = self.inputTransportFactory.getTransport(client) - otrans = self.outputTransportFactory.getTransport(client) - try_close(itrans) - try_close(otrans) - else: - itrans = self.inputTransportFactory.getTransport(client) - otrans = self.outputTransportFactory.getTransport(client) - - iprot = self.inputProtocolFactory.getProtocol(itrans) - oprot = self.outputProtocolFactory.getProtocol(otrans) - - ecode = 0 - try: - try: - while True: - self.processor.process(iprot, oprot) - except TTransport.TTransportException, tx: - pass - except Exception, e: - logging.exception(e) - ecode = 1 - finally: - try_close(itrans) - try_close(otrans) - - os._exit(ecode) - - except TTransport.TTransportException, tx: - pass - except Exception, x: - logging.exception(x) - - - def collect_children(self): - while self.children: - try: - pid, status = os.waitpid(0, os.WNOHANG) - except os.error: - pid = None - - if pid: - self.children.remove(pid) - else: - break - - diff --git a/module/lib/thrift/server/__init__.py b/module/lib/thrift/server/__init__.py deleted file mode 100644 index 1bf6e254e..000000000 --- a/module/lib/thrift/server/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -__all__ = ['TServer', 'TNonblockingServer'] diff --git a/module/lib/thrift/transport/THttpClient.py b/module/lib/thrift/transport/THttpClient.py deleted file mode 100644 index 50269785c..000000000 --- a/module/lib/thrift/transport/THttpClient.py +++ /dev/null @@ -1,126 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -from TTransport import * -from cStringIO import StringIO - -import urlparse -import httplib -import warnings -import socket - -class THttpClient(TTransportBase): - - """Http implementation of TTransport base.""" - - def __init__(self, uri_or_host, port=None, path=None): - """THttpClient supports two different types constructor parameters. - - THttpClient(host, port, path) - deprecated - THttpClient(uri) - - Only the second supports https.""" - - if port is not None: - warnings.warn("Please use the THttpClient('http://host:port/path') syntax", DeprecationWarning, stacklevel=2) - self.host = uri_or_host - self.port = port - assert path - self.path = path - self.scheme = 'http' - else: - parsed = urlparse.urlparse(uri_or_host) - self.scheme = parsed.scheme - assert self.scheme in ('http', 'https') - if self.scheme == 'http': - self.port = parsed.port or httplib.HTTP_PORT - elif self.scheme == 'https': - self.port = parsed.port or httplib.HTTPS_PORT - self.host = parsed.hostname - self.path = parsed.path - if parsed.query: - self.path += '?%s' % parsed.query - self.__wbuf = StringIO() - self.__http = None - self.__timeout = None - - def open(self): - if self.scheme == 'http': - self.__http = httplib.HTTP(self.host, self.port) - else: - self.__http = httplib.HTTPS(self.host, self.port) - - def close(self): - self.__http.close() - self.__http = None - - def isOpen(self): - return self.__http != None - - def setTimeout(self, ms): - if not hasattr(socket, 'getdefaulttimeout'): - raise NotImplementedError - - if ms is None: - self.__timeout = None - else: - self.__timeout = ms/1000.0 - - def read(self, sz): - return self.__http.file.read(sz) - - def write(self, buf): - self.__wbuf.write(buf) - - def __withTimeout(f): - def _f(*args, **kwargs): - orig_timeout = socket.getdefaulttimeout() - socket.setdefaulttimeout(args[0].__timeout) - result = f(*args, **kwargs) - socket.setdefaulttimeout(orig_timeout) - return result - return _f - - def flush(self): - if self.isOpen(): - self.close() - self.open(); - - # Pull data out of buffer - data = self.__wbuf.getvalue() - self.__wbuf = StringIO() - - # HTTP request - self.__http.putrequest('POST', self.path) - - # Write headers - self.__http.putheader('Host', self.host) - self.__http.putheader('Content-Type', 'application/x-thrift') - self.__http.putheader('Content-Length', str(len(data))) - self.__http.endheaders() - - # Write payload - self.__http.send(data) - - # Get reply to flush the request - self.code, self.message, self.headers = self.__http.getreply() - - # Decorate if we know how to timeout - if hasattr(socket, 'getdefaulttimeout'): - flush = __withTimeout(flush) diff --git a/module/lib/thrift/transport/TSocket.py b/module/lib/thrift/transport/TSocket.py deleted file mode 100644 index 4e0e1874f..000000000 --- a/module/lib/thrift/transport/TSocket.py +++ /dev/null @@ -1,163 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -from TTransport import * -import os -import errno -import socket -import sys - -class TSocketBase(TTransportBase): - def _resolveAddr(self): - if self._unix_socket is not None: - return [(socket.AF_UNIX, socket.SOCK_STREAM, None, None, self._unix_socket)] - else: - return socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM, 0, socket.AI_PASSIVE | socket.AI_ADDRCONFIG) - - def close(self): - if self.handle: - self.handle.close() - self.handle = None - -class TSocket(TSocketBase): - """Socket implementation of TTransport base.""" - - def __init__(self, host='localhost', port=9090, unix_socket=None): - """Initialize a TSocket - - @param host(str) The host to connect to. - @param port(int) The (TCP) port to connect to. - @param unix_socket(str) The filename of a unix socket to connect to. - (host and port will be ignored.) - """ - - self.host = host - self.port = port - self.handle = None - self._unix_socket = unix_socket - self._timeout = None - - def setHandle(self, h): - self.handle = h - - def isOpen(self): - return self.handle is not None - - def setTimeout(self, ms): - if ms is None: - self._timeout = None - else: - self._timeout = ms/1000.0 - - if self.handle is not None: - self.handle.settimeout(self._timeout) - - def open(self): - try: - res0 = self._resolveAddr() - for res in res0: - self.handle = socket.socket(res[0], res[1]) - self.handle.settimeout(self._timeout) - try: - self.handle.connect(res[4]) - except socket.error, e: - if res is not res0[-1]: - continue - else: - raise e - break - except socket.error, e: - if self._unix_socket: - message = 'Could not connect to socket %s' % self._unix_socket - else: - message = 'Could not connect to %s:%d' % (self.host, self.port) - raise TTransportException(type=TTransportException.NOT_OPEN, message=message) - - def read(self, sz): - try: - buff = self.handle.recv(sz) - except socket.error, e: - if (e.args[0] == errno.ECONNRESET and - (sys.platform == 'darwin' or sys.platform.startswith('freebsd'))): - # freebsd and Mach don't follow POSIX semantic of recv - # and fail with ECONNRESET if peer performed shutdown. - # See corresponding comment and code in TSocket::read() - # in lib/cpp/src/transport/TSocket.cpp. - self.close() - # Trigger the check to raise the END_OF_FILE exception below. - buff = '' - else: - raise - if len(buff) == 0: - raise TTransportException(type=TTransportException.END_OF_FILE, message='TSocket read 0 bytes') - return buff - - def write(self, buff): - if not self.handle: - raise TTransportException(type=TTransportException.NOT_OPEN, message='Transport not open') - sent = 0 - have = len(buff) - while sent < have: - plus = self.handle.send(buff) - if plus == 0: - raise TTransportException(type=TTransportException.END_OF_FILE, message='TSocket sent 0 bytes') - sent += plus - buff = buff[plus:] - - def flush(self): - pass - -class TServerSocket(TSocketBase, TServerTransportBase): - """Socket implementation of TServerTransport base.""" - - def __init__(self, host=None, port=9090, unix_socket=None): - self.host = host - self.port = port - self._unix_socket = unix_socket - self.handle = None - - def listen(self): - res0 = self._resolveAddr() - for res in res0: - if res[0] is socket.AF_INET6 or res is res0[-1]: - break - - # We need remove the old unix socket if the file exists and - # nobody is listening on it. - if self._unix_socket: - tmp = socket.socket(res[0], res[1]) - try: - tmp.connect(res[4]) - except socket.error, err: - eno, message = err.args - if eno == errno.ECONNREFUSED: - os.unlink(res[4]) - - self.handle = socket.socket(res[0], res[1]) - self.handle.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - if hasattr(self.handle, 'settimeout'): - self.handle.settimeout(None) - self.handle.bind(res[4]) - self.handle.listen(128) - - def accept(self): - client, addr = self.handle.accept() - result = TSocket() - result.setHandle(client) - return result diff --git a/module/lib/thrift/transport/TTransport.py b/module/lib/thrift/transport/TTransport.py deleted file mode 100644 index 12e51a9bf..000000000 --- a/module/lib/thrift/transport/TTransport.py +++ /dev/null @@ -1,331 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -from cStringIO import StringIO -from struct import pack,unpack -from thrift.Thrift import TException - -class TTransportException(TException): - - """Custom Transport Exception class""" - - UNKNOWN = 0 - NOT_OPEN = 1 - ALREADY_OPEN = 2 - TIMED_OUT = 3 - END_OF_FILE = 4 - - def __init__(self, type=UNKNOWN, message=None): - TException.__init__(self, message) - self.type = type - -class TTransportBase: - - """Base class for Thrift transport layer.""" - - def isOpen(self): - pass - - def open(self): - pass - - def close(self): - pass - - def read(self, sz): - pass - - def readAll(self, sz): - buff = '' - have = 0 - while (have < sz): - chunk = self.read(sz-have) - have += len(chunk) - buff += chunk - - if len(chunk) == 0: - raise EOFError() - - return buff - - def write(self, buf): - pass - - def flush(self): - pass - -# This class should be thought of as an interface. -class CReadableTransport: - """base class for transports that are readable from C""" - - # TODO(dreiss): Think about changing this interface to allow us to use - # a (Python, not c) StringIO instead, because it allows - # you to write after reading. - - # NOTE: This is a classic class, so properties will NOT work - # correctly for setting. - @property - def cstringio_buf(self): - """A cStringIO buffer that contains the current chunk we are reading.""" - pass - - def cstringio_refill(self, partialread, reqlen): - """Refills cstringio_buf. - - Returns the currently used buffer (which can but need not be the same as - the old cstringio_buf). partialread is what the C code has read from the - buffer, and should be inserted into the buffer before any more reads. The - return value must be a new, not borrowed reference. Something along the - lines of self._buf should be fine. - - If reqlen bytes can't be read, throw EOFError. - """ - pass - -class TServerTransportBase: - - """Base class for Thrift server transports.""" - - def listen(self): - pass - - def accept(self): - pass - - def close(self): - pass - -class TTransportFactoryBase: - - """Base class for a Transport Factory""" - - def getTransport(self, trans): - return trans - -class TBufferedTransportFactory: - - """Factory transport that builds buffered transports""" - - def getTransport(self, trans): - buffered = TBufferedTransport(trans) - return buffered - - -class TBufferedTransport(TTransportBase,CReadableTransport): - - """Class that wraps another transport and buffers its I/O. - - The implementation uses a (configurable) fixed-size read buffer - but buffers all writes until a flush is performed. - """ - - DEFAULT_BUFFER = 4096 - - def __init__(self, trans, rbuf_size = DEFAULT_BUFFER): - self.__trans = trans - self.__wbuf = StringIO() - self.__rbuf = StringIO("") - self.__rbuf_size = rbuf_size - - def isOpen(self): - return self.__trans.isOpen() - - def open(self): - return self.__trans.open() - - def close(self): - return self.__trans.close() - - def read(self, sz): - ret = self.__rbuf.read(sz) - if len(ret) != 0: - return ret - - self.__rbuf = StringIO(self.__trans.read(max(sz, self.__rbuf_size))) - return self.__rbuf.read(sz) - - def write(self, buf): - self.__wbuf.write(buf) - - def flush(self): - out = self.__wbuf.getvalue() - # reset wbuf before write/flush to preserve state on underlying failure - self.__wbuf = StringIO() - self.__trans.write(out) - self.__trans.flush() - - # Implement the CReadableTransport interface. - @property - def cstringio_buf(self): - return self.__rbuf - - def cstringio_refill(self, partialread, reqlen): - retstring = partialread - if reqlen < self.__rbuf_size: - # try to make a read of as much as we can. - retstring += self.__trans.read(self.__rbuf_size) - - # but make sure we do read reqlen bytes. - if len(retstring) < reqlen: - retstring += self.__trans.readAll(reqlen - len(retstring)) - - self.__rbuf = StringIO(retstring) - return self.__rbuf - -class TMemoryBuffer(TTransportBase, CReadableTransport): - """Wraps a cStringIO object as a TTransport. - - NOTE: Unlike the C++ version of this class, you cannot write to it - then immediately read from it. If you want to read from a - TMemoryBuffer, you must either pass a string to the constructor. - TODO(dreiss): Make this work like the C++ version. - """ - - def __init__(self, value=None): - """value -- a value to read from for stringio - - If value is set, this will be a transport for reading, - otherwise, it is for writing""" - if value is not None: - self._buffer = StringIO(value) - else: - self._buffer = StringIO() - - def isOpen(self): - return not self._buffer.closed - - def open(self): - pass - - def close(self): - self._buffer.close() - - def read(self, sz): - return self._buffer.read(sz) - - def write(self, buf): - self._buffer.write(buf) - - def flush(self): - pass - - def getvalue(self): - return self._buffer.getvalue() - - # Implement the CReadableTransport interface. - @property - def cstringio_buf(self): - return self._buffer - - def cstringio_refill(self, partialread, reqlen): - # only one shot at reading... - raise EOFError() - -class TFramedTransportFactory: - - """Factory transport that builds framed transports""" - - def getTransport(self, trans): - framed = TFramedTransport(trans) - return framed - - -class TFramedTransport(TTransportBase, CReadableTransport): - - """Class that wraps another transport and frames its I/O when writing.""" - - def __init__(self, trans,): - self.__trans = trans - self.__rbuf = StringIO() - self.__wbuf = StringIO() - - def isOpen(self): - return self.__trans.isOpen() - - def open(self): - return self.__trans.open() - - def close(self): - return self.__trans.close() - - def read(self, sz): - ret = self.__rbuf.read(sz) - if len(ret) != 0: - return ret - - self.readFrame() - return self.__rbuf.read(sz) - - def readFrame(self): - buff = self.__trans.readAll(4) - sz, = unpack('!i', buff) - self.__rbuf = StringIO(self.__trans.readAll(sz)) - - def write(self, buf): - self.__wbuf.write(buf) - - def flush(self): - wout = self.__wbuf.getvalue() - wsz = len(wout) - # reset wbuf before write/flush to preserve state on underlying failure - self.__wbuf = StringIO() - # N.B.: Doing this string concatenation is WAY cheaper than making - # two separate calls to the underlying socket object. Socket writes in - # Python turn out to be REALLY expensive, but it seems to do a pretty - # good job of managing string buffer operations without excessive copies - buf = pack("!i", wsz) + wout - self.__trans.write(buf) - self.__trans.flush() - - # Implement the CReadableTransport interface. - @property - def cstringio_buf(self): - return self.__rbuf - - def cstringio_refill(self, prefix, reqlen): - # self.__rbuf will already be empty here because fastbinary doesn't - # ask for a refill until the previous buffer is empty. Therefore, - # we can start reading new frames immediately. - while len(prefix) < reqlen: - self.readFrame() - prefix += self.__rbuf.getvalue() - self.__rbuf = StringIO(prefix) - return self.__rbuf - - -class TFileObjectTransport(TTransportBase): - """Wraps a file-like object to make it work as a Thrift transport.""" - - def __init__(self, fileobj): - self.fileobj = fileobj - - def isOpen(self): - return True - - def close(self): - self.fileobj.close() - - def read(self, sz): - return self.fileobj.read(sz) - - def write(self, buf): - self.fileobj.write(buf) - - def flush(self): - self.fileobj.flush() diff --git a/module/lib/thrift/transport/TTwisted.py b/module/lib/thrift/transport/TTwisted.py deleted file mode 100644 index b6dcb4e0b..000000000 --- a/module/lib/thrift/transport/TTwisted.py +++ /dev/null @@ -1,219 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -from zope.interface import implements, Interface, Attribute -from twisted.internet.protocol import Protocol, ServerFactory, ClientFactory, \ - connectionDone -from twisted.internet import defer -from twisted.protocols import basic -from twisted.python import log -from twisted.web import server, resource, http - -from thrift.transport import TTransport -from cStringIO import StringIO - - -class TMessageSenderTransport(TTransport.TTransportBase): - - def __init__(self): - self.__wbuf = StringIO() - - def write(self, buf): - self.__wbuf.write(buf) - - def flush(self): - msg = self.__wbuf.getvalue() - self.__wbuf = StringIO() - self.sendMessage(msg) - - def sendMessage(self, message): - raise NotImplementedError - - -class TCallbackTransport(TMessageSenderTransport): - - def __init__(self, func): - TMessageSenderTransport.__init__(self) - self.func = func - - def sendMessage(self, message): - self.func(message) - - -class ThriftClientProtocol(basic.Int32StringReceiver): - - MAX_LENGTH = 2 ** 31 - 1 - - def __init__(self, client_class, iprot_factory, oprot_factory=None): - self._client_class = client_class - self._iprot_factory = iprot_factory - if oprot_factory is None: - self._oprot_factory = iprot_factory - else: - self._oprot_factory = oprot_factory - - self.recv_map = {} - self.started = defer.Deferred() - - def dispatch(self, msg): - self.sendString(msg) - - def connectionMade(self): - tmo = TCallbackTransport(self.dispatch) - self.client = self._client_class(tmo, self._oprot_factory) - self.started.callback(self.client) - - def connectionLost(self, reason=connectionDone): - for k,v in self.client._reqs.iteritems(): - tex = TTransport.TTransportException( - type=TTransport.TTransportException.END_OF_FILE, - message='Connection closed') - v.errback(tex) - - def stringReceived(self, frame): - tr = TTransport.TMemoryBuffer(frame) - iprot = self._iprot_factory.getProtocol(tr) - (fname, mtype, rseqid) = iprot.readMessageBegin() - - try: - method = self.recv_map[fname] - except KeyError: - method = getattr(self.client, 'recv_' + fname) - self.recv_map[fname] = method - - method(iprot, mtype, rseqid) - - -class ThriftServerProtocol(basic.Int32StringReceiver): - - MAX_LENGTH = 2 ** 31 - 1 - - def dispatch(self, msg): - self.sendString(msg) - - def processError(self, error): - self.transport.loseConnection() - - def processOk(self, _, tmo): - msg = tmo.getvalue() - - if len(msg) > 0: - self.dispatch(msg) - - def stringReceived(self, frame): - tmi = TTransport.TMemoryBuffer(frame) - tmo = TTransport.TMemoryBuffer() - - iprot = self.factory.iprot_factory.getProtocol(tmi) - oprot = self.factory.oprot_factory.getProtocol(tmo) - - d = self.factory.processor.process(iprot, oprot) - d.addCallbacks(self.processOk, self.processError, - callbackArgs=(tmo,)) - - -class IThriftServerFactory(Interface): - - processor = Attribute("Thrift processor") - - iprot_factory = Attribute("Input protocol factory") - - oprot_factory = Attribute("Output protocol factory") - - -class IThriftClientFactory(Interface): - - client_class = Attribute("Thrift client class") - - iprot_factory = Attribute("Input protocol factory") - - oprot_factory = Attribute("Output protocol factory") - - -class ThriftServerFactory(ServerFactory): - - implements(IThriftServerFactory) - - protocol = ThriftServerProtocol - - def __init__(self, processor, iprot_factory, oprot_factory=None): - self.processor = processor - self.iprot_factory = iprot_factory - if oprot_factory is None: - self.oprot_factory = iprot_factory - else: - self.oprot_factory = oprot_factory - - -class ThriftClientFactory(ClientFactory): - - implements(IThriftClientFactory) - - protocol = ThriftClientProtocol - - def __init__(self, client_class, iprot_factory, oprot_factory=None): - self.client_class = client_class - self.iprot_factory = iprot_factory - if oprot_factory is None: - self.oprot_factory = iprot_factory - else: - self.oprot_factory = oprot_factory - - def buildProtocol(self, addr): - p = self.protocol(self.client_class, self.iprot_factory, - self.oprot_factory) - p.factory = self - return p - - -class ThriftResource(resource.Resource): - - allowedMethods = ('POST',) - - def __init__(self, processor, inputProtocolFactory, - outputProtocolFactory=None): - resource.Resource.__init__(self) - self.inputProtocolFactory = inputProtocolFactory - if outputProtocolFactory is None: - self.outputProtocolFactory = inputProtocolFactory - else: - self.outputProtocolFactory = outputProtocolFactory - self.processor = processor - - def getChild(self, path, request): - return self - - def _cbProcess(self, _, request, tmo): - msg = tmo.getvalue() - request.setResponseCode(http.OK) - request.setHeader("content-type", "application/x-thrift") - request.write(msg) - request.finish() - - def render_POST(self, request): - request.content.seek(0, 0) - data = request.content.read() - tmi = TTransport.TMemoryBuffer(data) - tmo = TTransport.TMemoryBuffer() - - iprot = self.inputProtocolFactory.getProtocol(tmi) - oprot = self.outputProtocolFactory.getProtocol(tmo) - - d = self.processor.process(iprot, oprot) - d.addCallback(self._cbProcess, request, tmo) - return server.NOT_DONE_YET diff --git a/module/lib/thrift/transport/TZlibTransport.py b/module/lib/thrift/transport/TZlibTransport.py deleted file mode 100644 index 784d4e1e0..000000000 --- a/module/lib/thrift/transport/TZlibTransport.py +++ /dev/null @@ -1,261 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -''' -TZlibTransport provides a compressed transport and transport factory -class, using the python standard library zlib module to implement -data compression. -''' - -from __future__ import division -import zlib -from cStringIO import StringIO -from TTransport import TTransportBase, CReadableTransport - -class TZlibTransportFactory(object): - ''' - Factory transport that builds zlib compressed transports. - - This factory caches the last single client/transport that it was passed - and returns the same TZlibTransport object that was created. - - This caching means the TServer class will get the _same_ transport - object for both input and output transports from this factory. - (For non-threaded scenarios only, since the cache only holds one object) - - The purpose of this caching is to allocate only one TZlibTransport where - only one is really needed (since it must have separate read/write buffers), - and makes the statistics from getCompSavings() and getCompRatio() - easier to understand. - ''' - - # class scoped cache of last transport given and zlibtransport returned - _last_trans = None - _last_z = None - - def getTransport(self, trans, compresslevel=9): - '''Wrap a transport , trans, with the TZlibTransport - compressed transport class, returning a new - transport to the caller. - - @param compresslevel: The zlib compression level, ranging - from 0 (no compression) to 9 (best compression). Defaults to 9. - @type compresslevel: int - - This method returns a TZlibTransport which wraps the - passed C{trans} TTransport derived instance. - ''' - if trans == self._last_trans: - return self._last_z - ztrans = TZlibTransport(trans, compresslevel) - self._last_trans = trans - self._last_z = ztrans - return ztrans - - -class TZlibTransport(TTransportBase, CReadableTransport): - ''' - Class that wraps a transport with zlib, compressing writes - and decompresses reads, using the python standard - library zlib module. - ''' - - # Read buffer size for the python fastbinary C extension, - # the TBinaryProtocolAccelerated class. - DEFAULT_BUFFSIZE = 4096 - - def __init__(self, trans, compresslevel=9): - ''' - Create a new TZlibTransport, wrapping C{trans}, another - TTransport derived object. - - @param trans: A thrift transport object, i.e. a TSocket() object. - @type trans: TTransport - @param compresslevel: The zlib compression level, ranging - from 0 (no compression) to 9 (best compression). Default is 9. - @type compresslevel: int - ''' - self.__trans = trans - self.compresslevel = compresslevel - self.__rbuf = StringIO() - self.__wbuf = StringIO() - self._init_zlib() - self._init_stats() - - def _reinit_buffers(self): - ''' - Internal method to initialize/reset the internal StringIO objects - for read and write buffers. - ''' - self.__rbuf = StringIO() - self.__wbuf = StringIO() - - def _init_stats(self): - ''' - Internal method to reset the internal statistics counters - for compression ratios and bandwidth savings. - ''' - self.bytes_in = 0 - self.bytes_out = 0 - self.bytes_in_comp = 0 - self.bytes_out_comp = 0 - - def _init_zlib(self): - ''' - Internal method for setting up the zlib compression and - decompression objects. - ''' - self._zcomp_read = zlib.decompressobj() - self._zcomp_write = zlib.compressobj(self.compresslevel) - - def getCompRatio(self): - ''' - Get the current measured compression ratios (in,out) from - this transport. - - Returns a tuple of: - (inbound_compression_ratio, outbound_compression_ratio) - - The compression ratios are computed as: - compressed / uncompressed - - E.g., data that compresses by 10x will have a ratio of: 0.10 - and data that compresses to half of ts original size will - have a ratio of 0.5 - - None is returned if no bytes have yet been processed in - a particular direction. - ''' - r_percent, w_percent = (None, None) - if self.bytes_in > 0: - r_percent = self.bytes_in_comp / self.bytes_in - if self.bytes_out > 0: - w_percent = self.bytes_out_comp / self.bytes_out - return (r_percent, w_percent) - - def getCompSavings(self): - ''' - Get the current count of saved bytes due to data - compression. - - Returns a tuple of: - (inbound_saved_bytes, outbound_saved_bytes) - - Note: if compression is actually expanding your - data (only likely with very tiny thrift objects), then - the values returned will be negative. - ''' - r_saved = self.bytes_in - self.bytes_in_comp - w_saved = self.bytes_out - self.bytes_out_comp - return (r_saved, w_saved) - - def isOpen(self): - '''Return the underlying transport's open status''' - return self.__trans.isOpen() - - def open(self): - """Open the underlying transport""" - self._init_stats() - return self.__trans.open() - - def listen(self): - '''Invoke the underlying transport's listen() method''' - self.__trans.listen() - - def accept(self): - '''Accept connections on the underlying transport''' - return self.__trans.accept() - - def close(self): - '''Close the underlying transport,''' - self._reinit_buffers() - self._init_zlib() - return self.__trans.close() - - def read(self, sz): - ''' - Read up to sz bytes from the decompressed bytes buffer, and - read from the underlying transport if the decompression - buffer is empty. - ''' - ret = self.__rbuf.read(sz) - if len(ret) > 0: - return ret - # keep reading from transport until something comes back - while True: - if self.readComp(sz): - break - ret = self.__rbuf.read(sz) - return ret - - def readComp(self, sz): - ''' - Read compressed data from the underlying transport, then - decompress it and append it to the internal StringIO read buffer - ''' - zbuf = self.__trans.read(sz) - zbuf = self._zcomp_read.unconsumed_tail + zbuf - buf = self._zcomp_read.decompress(zbuf) - self.bytes_in += len(zbuf) - self.bytes_in_comp += len(buf) - old = self.__rbuf.read() - self.__rbuf = StringIO(old + buf) - if len(old) + len(buf) == 0: - return False - return True - - def write(self, buf): - ''' - Write some bytes, putting them into the internal write - buffer for eventual compression. - ''' - self.__wbuf.write(buf) - - def flush(self): - ''' - Flush any queued up data in the write buffer and ensure the - compression buffer is flushed out to the underlying transport - ''' - wout = self.__wbuf.getvalue() - if len(wout) > 0: - zbuf = self._zcomp_write.compress(wout) - self.bytes_out += len(wout) - self.bytes_out_comp += len(zbuf) - else: - zbuf = '' - ztail = self._zcomp_write.flush(zlib.Z_SYNC_FLUSH) - self.bytes_out_comp += len(ztail) - if (len(zbuf) + len(ztail)) > 0: - self.__wbuf = StringIO() - self.__trans.write(zbuf + ztail) - self.__trans.flush() - - @property - def cstringio_buf(self): - '''Implement the CReadableTransport interface''' - return self.__rbuf - - def cstringio_refill(self, partialread, reqlen): - '''Implement the CReadableTransport interface for refill''' - retstring = partialread - if reqlen < self.DEFAULT_BUFFSIZE: - retstring += self.read(self.DEFAULT_BUFFSIZE) - while len(retstring) < reqlen: - retstring += self.read(reqlen - len(retstring)) - self.__rbuf = StringIO(retstring) - return self.__rbuf diff --git a/module/lib/thrift/transport/__init__.py b/module/lib/thrift/transport/__init__.py deleted file mode 100644 index 46e54fe6b..000000000 --- a/module/lib/thrift/transport/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -__all__ = ['TTransport', 'TSocket', 'THttpClient','TZlibTransport'] |