summaryrefslogtreecommitdiffstats
path: root/module/lib
diff options
context:
space:
mode:
authorGravatar RaNaN <Mast3rRaNaN@hotmail.de> 2012-09-21 23:27:12 +0200
committerGravatar RaNaN <Mast3rRaNaN@hotmail.de> 2012-09-21 23:27:12 +0200
commit7007ebe7710d29f26696de16407beced6dbacb6c (patch)
treec2bebbe1fc243fae6622804f708c6e2c87408ca4 /module/lib
parentmerge newest changes (diff)
downloadpyload-7007ebe7710d29f26696de16407beced6dbacb6c.tar.xz
added websocket backend, removed thrift
Diffstat (limited to 'module/lib')
-rw-r--r--module/lib/mod_pywebsocket/COPYING28
-rw-r--r--module/lib/mod_pywebsocket/__init__.py184
-rw-r--r--module/lib/mod_pywebsocket/_stream_base.py165
-rw-r--r--module/lib/mod_pywebsocket/_stream_hixie75.py228
-rw-r--r--module/lib/mod_pywebsocket/_stream_hybi.py755
-rw-r--r--module/lib/mod_pywebsocket/common.py304
-rw-r--r--module/lib/mod_pywebsocket/dispatch.py381
-rw-r--r--module/lib/mod_pywebsocket/extensions.py356
-rw-r--r--module/lib/mod_pywebsocket/handshake/__init__.py116
-rw-r--r--module/lib/mod_pywebsocket/handshake/_base.py219
-rw-r--r--module/lib/mod_pywebsocket/handshake/draft75.py190
-rw-r--r--module/lib/mod_pywebsocket/handshake/hybi.py372
-rw-r--r--module/lib/mod_pywebsocket/handshake/hybi00.py242
-rw-r--r--module/lib/mod_pywebsocket/headerparserhandler.py243
-rw-r--r--module/lib/mod_pywebsocket/http_header_util.py263
-rw-r--r--module/lib/mod_pywebsocket/memorizingfile.py99
-rw-r--r--module/lib/mod_pywebsocket/msgutil.py219
-rwxr-xr-xmodule/lib/mod_pywebsocket/standalone.py922
-rw-r--r--module/lib/mod_pywebsocket/stream.py56
-rw-r--r--module/lib/mod_pywebsocket/util.py466
-rw-r--r--module/lib/thrift/TSCons.py33
-rw-r--r--module/lib/thrift/TSerialization.py34
-rw-r--r--module/lib/thrift/Thrift.py154
-rw-r--r--module/lib/thrift/__init__.py20
-rw-r--r--module/lib/thrift/protocol/TBase.py72
-rw-r--r--module/lib/thrift/protocol/TBinaryProtocol.py259
-rw-r--r--module/lib/thrift/protocol/TCompactProtocol.py395
-rw-r--r--module/lib/thrift/protocol/TProtocol.py404
-rw-r--r--module/lib/thrift/protocol/__init__.py20
-rw-r--r--module/lib/thrift/server/THttpServer.py82
-rw-r--r--module/lib/thrift/server/TNonblockingServer.py310
-rw-r--r--module/lib/thrift/server/TProcessPoolServer.py125
-rw-r--r--module/lib/thrift/server/TServer.py274
-rw-r--r--module/lib/thrift/server/__init__.py20
-rw-r--r--module/lib/thrift/transport/THttpClient.py126
-rw-r--r--module/lib/thrift/transport/TSocket.py163
-rw-r--r--module/lib/thrift/transport/TTransport.py331
-rw-r--r--module/lib/thrift/transport/TTwisted.py219
-rw-r--r--module/lib/thrift/transport/TZlibTransport.py261
-rw-r--r--module/lib/thrift/transport/__init__.py20
40 files changed, 5808 insertions, 3322 deletions
diff --git a/module/lib/mod_pywebsocket/COPYING b/module/lib/mod_pywebsocket/COPYING
new file mode 100644
index 000000000..989d02e4c
--- /dev/null
+++ b/module/lib/mod_pywebsocket/COPYING
@@ -0,0 +1,28 @@
+Copyright 2012, Google Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/module/lib/mod_pywebsocket/__init__.py b/module/lib/mod_pywebsocket/__init__.py
new file mode 100644
index 000000000..c154da4a1
--- /dev/null
+++ b/module/lib/mod_pywebsocket/__init__.py
@@ -0,0 +1,184 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""WebSocket extension for Apache HTTP Server.
+
+mod_pywebsocket is a WebSocket extension for Apache HTTP Server
+intended for testing or experimental purposes. mod_python is required.
+
+
+Installation:
+
+0. Prepare an Apache HTTP Server for which mod_python is enabled.
+
+1. Specify the following Apache HTTP Server directives to suit your
+ configuration.
+
+ If mod_pywebsocket is not in the Python path, specify the following.
+ <websock_lib> is the directory where mod_pywebsocket is installed.
+
+ PythonPath "sys.path+['<websock_lib>']"
+
+ Always specify the following. <websock_handlers> is the directory where
+ user-written WebSocket handlers are placed.
+
+ PythonOption mod_pywebsocket.handler_root <websock_handlers>
+ PythonHeaderParserHandler mod_pywebsocket.headerparserhandler
+
+ To limit the search for WebSocket handlers to a directory <scan_dir>
+ under <websock_handlers>, configure as follows:
+
+ PythonOption mod_pywebsocket.handler_scan <scan_dir>
+
+ <scan_dir> is useful in saving scan time when <websock_handlers>
+ contains many non-WebSocket handler files.
+
+ If you want to support old handshake based on
+ draft-hixie-thewebsocketprotocol-75:
+
+ PythonOption mod_pywebsocket.allow_draft75 On
+
+ If you want to allow handlers whose canonical path is not under the root
+ directory (i.e. symbolic link is in root directory but its target is not),
+ configure as follows:
+
+ PythonOption mod_pywebsocket.allow_handlers_outside_root_dir On
+
+ Example snippet of httpd.conf:
+ (mod_pywebsocket is in /websock_lib, WebSocket handlers are in
+ /websock_handlers, port is 80 for ws, 443 for wss.)
+
+ <IfModule python_module>
+ PythonPath "sys.path+['/websock_lib']"
+ PythonOption mod_pywebsocket.handler_root /websock_handlers
+ PythonHeaderParserHandler mod_pywebsocket.headerparserhandler
+ </IfModule>
+
+2. Tune Apache parameters for serving WebSocket. We'd like to note that at
+ least TimeOut directive from core features and RequestReadTimeout
+ directive from mod_reqtimeout should be modified not to kill connections
+ in only a few seconds of idle time.
+
+3. Verify installation. You can use example/console.html to poke the server.
+
+
+Writing WebSocket handlers:
+
+When a WebSocket request comes in, the resource name
+specified in the handshake is considered as if it is a file path under
+<websock_handlers> and the handler defined in
+<websock_handlers>/<resource_name>_wsh.py is invoked.
+
+For example, if the resource name is /example/chat, the handler defined in
+<websock_handlers>/example/chat_wsh.py is invoked.
+
+A WebSocket handler is composed of the following three functions:
+
+ web_socket_do_extra_handshake(request)
+ web_socket_transfer_data(request)
+ web_socket_passive_closing_handshake(request)
+
+where:
+ request: mod_python request.
+
+web_socket_do_extra_handshake is called during the handshake after the
+headers are successfully parsed and WebSocket properties (ws_location,
+ws_origin, and ws_resource) are added to request. A handler
+can reject the request by raising an exception.
+
+A request object has the following properties that you can use during the
+extra handshake (web_socket_do_extra_handshake):
+- ws_resource
+- ws_origin
+- ws_version
+- ws_location (Hixie 75 and HyBi 00 only)
+- ws_extensions (Hybi 06 and later)
+- ws_deflate (HyBi 06 and later)
+- ws_protocol
+- ws_requested_protocols (HyBi 06 and later)
+
+The last two are a bit tricky.
+
+For HyBi 06 and later, ws_protocol is always set to None when
+web_socket_do_extra_handshake is called. If ws_requested_protocols is not
+None, you must choose one subprotocol from this list and set it to
+ws_protocol.
+
+For Hixie 75 and HyBi 00, when web_socket_do_extra_handshake is called,
+ws_protocol is set to the value given by the client in
+Sec-WebSocket-Protocol (WebSocket-Protocol for Hixie 75) header or None if
+such header was not found in the opening handshake request. Finish extra
+handshake with ws_protocol untouched to accept the request subprotocol.
+Then, Sec-WebSocket-Protocol (or WebSocket-Protocol) header will be sent to
+the client in response with the same value as requested. Raise an exception
+in web_socket_do_extra_handshake to reject the requested subprotocol.
+
+web_socket_transfer_data is called after the handshake completed
+successfully. A handler can receive/send messages from/to the client
+using request. mod_pywebsocket.msgutil module provides utilities
+for data transfer.
+
+You can receive a message by the following statement.
+
+ message = request.ws_stream.receive_message()
+
+This call blocks until any complete text frame arrives, and the payload data
+of the incoming frame will be stored into message. When you're using IETF
+HyBi 00 or later protocol, receive_message() will return None on receiving
+client-initiated closing handshake. When any error occurs, receive_message()
+will raise some exception.
+
+You can send a message by the following statement.
+
+ request.ws_stream.send_message(message)
+
+Executing the following statement or just return-ing from
+web_socket_transfer_data cause connection close.
+
+ request.ws_stream.close_connection()
+
+When you're using IETF HyBi 00 or later protocol, close_connection will wait
+for closing handshake acknowledgement coming from the client. When it
+couldn't receive a valid acknowledgement, raises an exception.
+
+web_socket_passive_closing_handshake is called after the server receives
+incoming closing frame from the client peer immediately. You can specify
+code and reason by return values. They are sent as a outgoing closing frame
+from the server. A request object has the following properties that you can
+use in web_socket_passive_closing_handshake.
+- ws_close_code
+- ws_close_reason
+
+A WebSocket handler must be thread-safe if the server (Apache or
+standalone.py) is configured to use threads.
+"""
+
+
+# vi:sts=4 sw=4 et tw=72
diff --git a/module/lib/mod_pywebsocket/_stream_base.py b/module/lib/mod_pywebsocket/_stream_base.py
new file mode 100644
index 000000000..60fb33d2c
--- /dev/null
+++ b/module/lib/mod_pywebsocket/_stream_base.py
@@ -0,0 +1,165 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Base stream class.
+"""
+
+
+# Note: request.connection.write/read are used in this module, even though
+# mod_python document says that they should be used only in connection
+# handlers. Unfortunately, we have no other options. For example,
+# request.write/read are not suitable because they don't allow direct raw bytes
+# writing/reading.
+
+
+from mod_pywebsocket import util
+
+
+# Exceptions
+
+
+class ConnectionTerminatedException(Exception):
+ """This exception will be raised when a connection is terminated
+ unexpectedly.
+ """
+
+ pass
+
+
+class InvalidFrameException(ConnectionTerminatedException):
+ """This exception will be raised when we received an invalid frame we
+ cannot parse.
+ """
+
+ pass
+
+
+class BadOperationException(Exception):
+ """This exception will be raised when send_message() is called on
+ server-terminated connection or receive_message() is called on
+ client-terminated connection.
+ """
+
+ pass
+
+
+class UnsupportedFrameException(Exception):
+ """This exception will be raised when we receive a frame with flag, opcode
+ we cannot handle. Handlers can just catch and ignore this exception and
+ call receive_message() again to continue processing the next frame.
+ """
+
+ pass
+
+
+class InvalidUTF8Exception(Exception):
+ """This exception will be raised when we receive a text frame which
+ contains invalid UTF-8 strings.
+ """
+
+ pass
+
+
+class StreamBase(object):
+ """Base stream class."""
+
+ def __init__(self, request):
+ """Construct an instance.
+
+ Args:
+ request: mod_python request.
+ """
+
+ self._logger = util.get_class_logger(self)
+
+ self._request = request
+
+ def _read(self, length):
+ """Reads length bytes from connection. In case we catch any exception,
+ prepends remote address to the exception message and raise again.
+
+ Raises:
+ ConnectionTerminatedException: when read returns empty string.
+ """
+
+ bytes = self._request.connection.read(length)
+ if not bytes:
+ raise ConnectionTerminatedException(
+ 'Receiving %d byte failed. Peer (%r) closed connection' %
+ (length, (self._request.connection.remote_addr,)))
+ return bytes
+
+ def _write(self, bytes):
+ """Writes given bytes to connection. In case we catch any exception,
+ prepends remote address to the exception message and raise again.
+ """
+
+ try:
+ self._request.connection.write(bytes)
+ except Exception, e:
+ util.prepend_message_to_exception(
+ 'Failed to send message to %r: ' %
+ (self._request.connection.remote_addr,),
+ e)
+ raise
+
+ def receive_bytes(self, length):
+ """Receives multiple bytes. Retries read when we couldn't receive the
+ specified amount.
+
+ Raises:
+ ConnectionTerminatedException: when read returns empty string.
+ """
+
+ bytes = []
+ while length > 0:
+ new_bytes = self._read(length)
+ bytes.append(new_bytes)
+ length -= len(new_bytes)
+ return ''.join(bytes)
+
+ def _read_until(self, delim_char):
+ """Reads bytes until we encounter delim_char. The result will not
+ contain delim_char.
+
+ Raises:
+ ConnectionTerminatedException: when read returns empty string.
+ """
+
+ bytes = []
+ while True:
+ ch = self._read(1)
+ if ch == delim_char:
+ break
+ bytes.append(ch)
+ return ''.join(bytes)
+
+
+# vi:sts=4 sw=4 et
diff --git a/module/lib/mod_pywebsocket/_stream_hixie75.py b/module/lib/mod_pywebsocket/_stream_hixie75.py
new file mode 100644
index 000000000..c84ca6e07
--- /dev/null
+++ b/module/lib/mod_pywebsocket/_stream_hixie75.py
@@ -0,0 +1,228 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""This file provides a class for parsing/building frames of the WebSocket
+protocol version HyBi 00 and Hixie 75.
+
+Specification:
+http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-00
+"""
+
+
+from mod_pywebsocket import common
+from mod_pywebsocket._stream_base import BadOperationException
+from mod_pywebsocket._stream_base import ConnectionTerminatedException
+from mod_pywebsocket._stream_base import InvalidFrameException
+from mod_pywebsocket._stream_base import StreamBase
+from mod_pywebsocket._stream_base import UnsupportedFrameException
+from mod_pywebsocket import util
+
+
+class StreamHixie75(StreamBase):
+ """A class for parsing/building frames of the WebSocket protocol version
+ HyBi 00 and Hixie 75.
+ """
+
+ def __init__(self, request, enable_closing_handshake=False):
+ """Construct an instance.
+
+ Args:
+ request: mod_python request.
+ enable_closing_handshake: to let StreamHixie75 perform closing
+ handshake as specified in HyBi 00, set
+ this option to True.
+ """
+
+ StreamBase.__init__(self, request)
+
+ self._logger = util.get_class_logger(self)
+
+ self._enable_closing_handshake = enable_closing_handshake
+
+ self._request.client_terminated = False
+ self._request.server_terminated = False
+
+ def send_message(self, message, end=True, binary=False):
+ """Send message.
+
+ Args:
+ message: unicode string to send.
+ binary: not used in hixie75.
+
+ Raises:
+ BadOperationException: when called on a server-terminated
+ connection.
+ """
+
+ if not end:
+ raise BadOperationException(
+ 'StreamHixie75 doesn\'t support send_message with end=False')
+
+ if binary:
+ raise BadOperationException(
+ 'StreamHixie75 doesn\'t support send_message with binary=True')
+
+ if self._request.server_terminated:
+ raise BadOperationException(
+ 'Requested send_message after sending out a closing handshake')
+
+ self._write(''.join(['\x00', message.encode('utf-8'), '\xff']))
+
+ def _read_payload_length_hixie75(self):
+ """Reads a length header in a Hixie75 version frame with length.
+
+ Raises:
+ ConnectionTerminatedException: when read returns empty string.
+ """
+
+ length = 0
+ while True:
+ b_str = self._read(1)
+ b = ord(b_str)
+ length = length * 128 + (b & 0x7f)
+ if (b & 0x80) == 0:
+ break
+ return length
+
+ def receive_message(self):
+ """Receive a WebSocket frame and return its payload an unicode string.
+
+ Returns:
+ payload unicode string in a WebSocket frame.
+
+ Raises:
+ ConnectionTerminatedException: when read returns empty
+ string.
+ BadOperationException: when called on a client-terminated
+ connection.
+ """
+
+ if self._request.client_terminated:
+ raise BadOperationException(
+ 'Requested receive_message after receiving a closing '
+ 'handshake')
+
+ while True:
+ # Read 1 byte.
+ # mp_conn.read will block if no bytes are available.
+ # Timeout is controlled by TimeOut directive of Apache.
+ frame_type_str = self.receive_bytes(1)
+ frame_type = ord(frame_type_str)
+ if (frame_type & 0x80) == 0x80:
+ # The payload length is specified in the frame.
+ # Read and discard.
+ length = self._read_payload_length_hixie75()
+ if length > 0:
+ _ = self.receive_bytes(length)
+ # 5.3 3. 12. if /type/ is 0xFF and /length/ is 0, then set the
+ # /client terminated/ flag and abort these steps.
+ if not self._enable_closing_handshake:
+ continue
+
+ if frame_type == 0xFF and length == 0:
+ self._request.client_terminated = True
+
+ if self._request.server_terminated:
+ self._logger.debug(
+ 'Received ack for server-initiated closing '
+ 'handshake')
+ return None
+
+ self._logger.debug(
+ 'Received client-initiated closing handshake')
+
+ self._send_closing_handshake()
+ self._logger.debug(
+ 'Sent ack for client-initiated closing handshake')
+ return None
+ else:
+ # The payload is delimited with \xff.
+ bytes = self._read_until('\xff')
+ # The WebSocket protocol section 4.4 specifies that invalid
+ # characters must be replaced with U+fffd REPLACEMENT
+ # CHARACTER.
+ message = bytes.decode('utf-8', 'replace')
+ if frame_type == 0x00:
+ return message
+ # Discard data of other types.
+
+ def _send_closing_handshake(self):
+ if not self._enable_closing_handshake:
+ raise BadOperationException(
+ 'Closing handshake is not supported in Hixie 75 protocol')
+
+ self._request.server_terminated = True
+
+ # 5.3 the server may decide to terminate the WebSocket connection by
+ # running through the following steps:
+ # 1. send a 0xFF byte and a 0x00 byte to the client to indicate the
+ # start of the closing handshake.
+ self._write('\xff\x00')
+
+ def close_connection(self, unused_code='', unused_reason=''):
+ """Closes a WebSocket connection.
+
+ Raises:
+ ConnectionTerminatedException: when closing handshake was
+ not successfull.
+ """
+
+ if self._request.server_terminated:
+ self._logger.debug(
+ 'Requested close_connection but server is already terminated')
+ return
+
+ if not self._enable_closing_handshake:
+ self._request.server_terminated = True
+ self._logger.debug('Connection closed')
+ return
+
+ self._send_closing_handshake()
+ self._logger.debug('Sent server-initiated closing handshake')
+
+ # TODO(ukai): 2. wait until the /client terminated/ flag has been set,
+ # or until a server-defined timeout expires.
+ #
+ # For now, we expect receiving closing handshake right after sending
+ # out closing handshake, and if we couldn't receive non-handshake
+ # frame, we take it as ConnectionTerminatedException.
+ message = self.receive_message()
+ if message is not None:
+ raise ConnectionTerminatedException(
+ 'Didn\'t receive valid ack for closing handshake')
+ # TODO: 3. close the WebSocket connection.
+ # note: mod_python Connection (mp_conn) doesn't have close method.
+
+ def send_ping(self, body):
+ raise BadOperationException(
+ 'StreamHixie75 doesn\'t support send_ping')
+
+
+# vi:sts=4 sw=4 et
diff --git a/module/lib/mod_pywebsocket/_stream_hybi.py b/module/lib/mod_pywebsocket/_stream_hybi.py
new file mode 100644
index 000000000..34fa7a60e
--- /dev/null
+++ b/module/lib/mod_pywebsocket/_stream_hybi.py
@@ -0,0 +1,755 @@
+# Copyright 2012, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""This file provides classes and helper functions for parsing/building frames
+of the WebSocket protocol (RFC 6455).
+
+Specification:
+http://tools.ietf.org/html/rfc6455
+"""
+
+
+from collections import deque
+import os
+import struct
+import time
+
+from mod_pywebsocket import common
+from mod_pywebsocket import util
+from mod_pywebsocket._stream_base import BadOperationException
+from mod_pywebsocket._stream_base import ConnectionTerminatedException
+from mod_pywebsocket._stream_base import InvalidFrameException
+from mod_pywebsocket._stream_base import InvalidUTF8Exception
+from mod_pywebsocket._stream_base import StreamBase
+from mod_pywebsocket._stream_base import UnsupportedFrameException
+
+
+_NOOP_MASKER = util.NoopMasker()
+
+
+class Frame(object):
+
+ def __init__(self, fin=1, rsv1=0, rsv2=0, rsv3=0,
+ opcode=None, payload=''):
+ self.fin = fin
+ self.rsv1 = rsv1
+ self.rsv2 = rsv2
+ self.rsv3 = rsv3
+ self.opcode = opcode
+ self.payload = payload
+
+
+# Helper functions made public to be used for writing unittests for WebSocket
+# clients.
+
+
+def create_length_header(length, mask):
+ """Creates a length header.
+
+ Args:
+ length: Frame length. Must be less than 2^63.
+ mask: Mask bit. Must be boolean.
+
+ Raises:
+ ValueError: when bad data is given.
+ """
+
+ if mask:
+ mask_bit = 1 << 7
+ else:
+ mask_bit = 0
+
+ if length < 0:
+ raise ValueError('length must be non negative integer')
+ elif length <= 125:
+ return chr(mask_bit | length)
+ elif length < (1 << 16):
+ return chr(mask_bit | 126) + struct.pack('!H', length)
+ elif length < (1 << 63):
+ return chr(mask_bit | 127) + struct.pack('!Q', length)
+ else:
+ raise ValueError('Payload is too big for one frame')
+
+
+def create_header(opcode, payload_length, fin, rsv1, rsv2, rsv3, mask):
+ """Creates a frame header.
+
+ Raises:
+ Exception: when bad data is given.
+ """
+
+ if opcode < 0 or 0xf < opcode:
+ raise ValueError('Opcode out of range')
+
+ if payload_length < 0 or (1 << 63) <= payload_length:
+ raise ValueError('payload_length out of range')
+
+ if (fin | rsv1 | rsv2 | rsv3) & ~1:
+ raise ValueError('FIN bit and Reserved bit parameter must be 0 or 1')
+
+ header = ''
+
+ first_byte = ((fin << 7)
+ | (rsv1 << 6) | (rsv2 << 5) | (rsv3 << 4)
+ | opcode)
+ header += chr(first_byte)
+ header += create_length_header(payload_length, mask)
+
+ return header
+
+
+def _build_frame(header, body, mask):
+ if not mask:
+ return header + body
+
+ masking_nonce = os.urandom(4)
+ masker = util.RepeatedXorMasker(masking_nonce)
+
+ return header + masking_nonce + masker.mask(body)
+
+
+def _filter_and_format_frame_object(frame, mask, frame_filters):
+ for frame_filter in frame_filters:
+ frame_filter.filter(frame)
+
+ header = create_header(
+ frame.opcode, len(frame.payload), frame.fin,
+ frame.rsv1, frame.rsv2, frame.rsv3, mask)
+ return _build_frame(header, frame.payload, mask)
+
+
+def create_binary_frame(
+ message, opcode=common.OPCODE_BINARY, fin=1, mask=False, frame_filters=[]):
+ """Creates a simple binary frame with no extension, reserved bit."""
+
+ frame = Frame(fin=fin, opcode=opcode, payload=message)
+ return _filter_and_format_frame_object(frame, mask, frame_filters)
+
+
+def create_text_frame(
+ message, opcode=common.OPCODE_TEXT, fin=1, mask=False, frame_filters=[]):
+ """Creates a simple text frame with no extension, reserved bit."""
+
+ encoded_message = message.encode('utf-8')
+ return create_binary_frame(encoded_message, opcode, fin, mask,
+ frame_filters)
+
+
+class FragmentedFrameBuilder(object):
+ """A stateful class to send a message as fragments."""
+
+ def __init__(self, mask, frame_filters=[]):
+ """Constructs an instance."""
+
+ self._mask = mask
+ self._frame_filters = frame_filters
+
+ self._started = False
+
+ # Hold opcode of the first frame in messages to verify types of other
+ # frames in the message are all the same.
+ self._opcode = common.OPCODE_TEXT
+
+ def build(self, message, end, binary):
+ if binary:
+ frame_type = common.OPCODE_BINARY
+ else:
+ frame_type = common.OPCODE_TEXT
+ if self._started:
+ if self._opcode != frame_type:
+ raise ValueError('Message types are different in frames for '
+ 'the same message')
+ opcode = common.OPCODE_CONTINUATION
+ else:
+ opcode = frame_type
+ self._opcode = frame_type
+
+ if end:
+ self._started = False
+ fin = 1
+ else:
+ self._started = True
+ fin = 0
+
+ if binary:
+ return create_binary_frame(
+ message, opcode, fin, self._mask, self._frame_filters)
+ else:
+ return create_text_frame(
+ message, opcode, fin, self._mask, self._frame_filters)
+
+
+def _create_control_frame(opcode, body, mask, frame_filters):
+ frame = Frame(opcode=opcode, payload=body)
+
+ for frame_filter in frame_filters:
+ frame_filter.filter(frame)
+
+ if len(frame.payload) > 125:
+ raise BadOperationException(
+ 'Payload data size of control frames must be 125 bytes or less')
+
+ header = create_header(
+ frame.opcode, len(frame.payload), frame.fin,
+ frame.rsv1, frame.rsv2, frame.rsv3, mask)
+ return _build_frame(header, frame.payload, mask)
+
+
+def create_ping_frame(body, mask=False, frame_filters=[]):
+ return _create_control_frame(common.OPCODE_PING, body, mask, frame_filters)
+
+
+def create_pong_frame(body, mask=False, frame_filters=[]):
+ return _create_control_frame(common.OPCODE_PONG, body, mask, frame_filters)
+
+
+def create_close_frame(body, mask=False, frame_filters=[]):
+ return _create_control_frame(
+ common.OPCODE_CLOSE, body, mask, frame_filters)
+
+
+class StreamOptions(object):
+ """Holds option values to configure Stream objects."""
+
+ def __init__(self):
+ """Constructs StreamOptions."""
+
+ # Enables deflate-stream extension.
+ self.deflate_stream = False
+
+ # Filters applied to frames.
+ self.outgoing_frame_filters = []
+ self.incoming_frame_filters = []
+
+ self.mask_send = False
+ self.unmask_receive = True
+
+
+class Stream(StreamBase):
+ """A class for parsing/building frames of the WebSocket protocol
+ (RFC 6455).
+ """
+
+ def __init__(self, request, options):
+ """Constructs an instance.
+
+ Args:
+ request: mod_python request.
+ """
+
+ StreamBase.__init__(self, request)
+
+ self._logger = util.get_class_logger(self)
+
+ self._options = options
+
+ if self._options.deflate_stream:
+ self._logger.debug('Setup filter for deflate-stream')
+ self._request = util.DeflateRequest(self._request)
+
+ self._request.client_terminated = False
+ self._request.server_terminated = False
+
+ # Holds body of received fragments.
+ self._received_fragments = []
+ # Holds the opcode of the first fragment.
+ self._original_opcode = None
+
+ self._writer = FragmentedFrameBuilder(
+ self._options.mask_send, self._options.outgoing_frame_filters)
+
+ self._ping_queue = deque()
+
+ def _receive_frame(self):
+ """Receives a frame and return data in the frame as a tuple containing
+ each header field and payload separately.
+
+ Raises:
+ ConnectionTerminatedException: when read returns empty
+ string.
+ InvalidFrameException: when the frame contains invalid data.
+ """
+
+ self._logger.log(common.LOGLEVEL_FINE,
+ 'Receive the first 2 octets of a frame')
+
+ received = self.receive_bytes(2)
+
+ first_byte = ord(received[0])
+ fin = (first_byte >> 7) & 1
+ rsv1 = (first_byte >> 6) & 1
+ rsv2 = (first_byte >> 5) & 1
+ rsv3 = (first_byte >> 4) & 1
+ opcode = first_byte & 0xf
+
+ second_byte = ord(received[1])
+ mask = (second_byte >> 7) & 1
+ payload_length = second_byte & 0x7f
+
+ self._logger.log(common.LOGLEVEL_FINE,
+ 'FIN=%s, RSV1=%s, RSV2=%s, RSV3=%s, opcode=%s, '
+ 'Mask=%s, Payload_length=%s',
+ fin, rsv1, rsv2, rsv3, opcode, mask, payload_length)
+
+ if (mask == 1) != self._options.unmask_receive:
+ raise InvalidFrameException(
+ 'Mask bit on the received frame did\'nt match masking '
+ 'configuration for received frames')
+
+ # The Hybi-13 and later specs disallow putting a value in 0x0-0xFFFF
+ # into the 8-octet extended payload length field (or 0x0-0xFD in
+ # 2-octet field).
+ valid_length_encoding = True
+ length_encoding_bytes = 1
+ if payload_length == 127:
+ self._logger.log(common.LOGLEVEL_FINE,
+ 'Receive 8-octet extended payload length')
+
+ extended_payload_length = self.receive_bytes(8)
+ payload_length = struct.unpack(
+ '!Q', extended_payload_length)[0]
+ if payload_length > 0x7FFFFFFFFFFFFFFF:
+ raise InvalidFrameException(
+ 'Extended payload length >= 2^63')
+ if self._request.ws_version >= 13 and payload_length < 0x10000:
+ valid_length_encoding = False
+ length_encoding_bytes = 8
+
+ self._logger.log(common.LOGLEVEL_FINE,
+ 'Decoded_payload_length=%s', payload_length)
+ elif payload_length == 126:
+ self._logger.log(common.LOGLEVEL_FINE,
+ 'Receive 2-octet extended payload length')
+
+ extended_payload_length = self.receive_bytes(2)
+ payload_length = struct.unpack(
+ '!H', extended_payload_length)[0]
+ if self._request.ws_version >= 13 and payload_length < 126:
+ valid_length_encoding = False
+ length_encoding_bytes = 2
+
+ self._logger.log(common.LOGLEVEL_FINE,
+ 'Decoded_payload_length=%s', payload_length)
+
+ if not valid_length_encoding:
+ self._logger.warning(
+ 'Payload length is not encoded using the minimal number of '
+ 'bytes (%d is encoded using %d bytes)',
+ payload_length,
+ length_encoding_bytes)
+
+ if mask == 1:
+ self._logger.log(common.LOGLEVEL_FINE, 'Receive mask')
+
+ masking_nonce = self.receive_bytes(4)
+ masker = util.RepeatedXorMasker(masking_nonce)
+
+ self._logger.log(common.LOGLEVEL_FINE, 'Mask=%r', masking_nonce)
+ else:
+ masker = _NOOP_MASKER
+
+ self._logger.log(common.LOGLEVEL_FINE, 'Receive payload data')
+ if self._logger.isEnabledFor(common.LOGLEVEL_FINE):
+ receive_start = time.time()
+
+ raw_payload_bytes = self.receive_bytes(payload_length)
+
+ if self._logger.isEnabledFor(common.LOGLEVEL_FINE):
+ self._logger.log(
+ common.LOGLEVEL_FINE,
+ 'Done receiving payload data at %s MB/s',
+ payload_length / (time.time() - receive_start) / 1000 / 1000)
+ self._logger.log(common.LOGLEVEL_FINE, 'Unmask payload data')
+
+ if self._logger.isEnabledFor(common.LOGLEVEL_FINE):
+ unmask_start = time.time()
+
+ bytes = masker.mask(raw_payload_bytes)
+
+ if self._logger.isEnabledFor(common.LOGLEVEL_FINE):
+ self._logger.log(
+ common.LOGLEVEL_FINE,
+ 'Done unmasking payload data at %s MB/s',
+ payload_length / (time.time() - unmask_start) / 1000 / 1000)
+
+ return opcode, bytes, fin, rsv1, rsv2, rsv3
+
+ def _receive_frame_as_frame_object(self):
+ opcode, bytes, fin, rsv1, rsv2, rsv3 = self._receive_frame()
+
+ return Frame(fin=fin, rsv1=rsv1, rsv2=rsv2, rsv3=rsv3,
+ opcode=opcode, payload=bytes)
+
+ def send_message(self, message, end=True, binary=False):
+ """Send message.
+
+ Args:
+ message: text in unicode or binary in str to send.
+ binary: send message as binary frame.
+
+ Raises:
+ BadOperationException: when called on a server-terminated
+ connection or called with inconsistent message type or
+ binary parameter.
+ """
+
+ if self._request.server_terminated:
+ raise BadOperationException(
+ 'Requested send_message after sending out a closing handshake')
+
+ if binary and isinstance(message, unicode):
+ raise BadOperationException(
+ 'Message for binary frame must be instance of str')
+
+ try:
+ self._write(self._writer.build(message, end, binary))
+ except ValueError, e:
+ raise BadOperationException(e)
+
+ def receive_message(self):
+ """Receive a WebSocket frame and return its payload as a text in
+ unicode or a binary in str.
+
+ Returns:
+ payload data of the frame
+ - as unicode instance if received text frame
+ - as str instance if received binary frame
+ or None iff received closing handshake.
+ Raises:
+ BadOperationException: when called on a client-terminated
+ connection.
+ ConnectionTerminatedException: when read returns empty
+ string.
+ InvalidFrameException: when the frame contains invalid
+ data.
+ UnsupportedFrameException: when the received frame has
+ flags, opcode we cannot handle. You can ignore this
+ exception and continue receiving the next frame.
+ """
+
+ if self._request.client_terminated:
+ raise BadOperationException(
+ 'Requested receive_message after receiving a closing '
+ 'handshake')
+
+ while True:
+ # mp_conn.read will block if no bytes are available.
+ # Timeout is controlled by TimeOut directive of Apache.
+
+ frame = self._receive_frame_as_frame_object()
+
+ # Check the constraint on the payload size for control frames
+ # before extension processes the frame.
+ # See also http://tools.ietf.org/html/rfc6455#section-5.5
+ if (common.is_control_opcode(frame.opcode) and
+ len(frame.payload) > 125):
+ raise InvalidFrameException(
+ 'Payload data size of control frames must be 125 bytes or '
+ 'less')
+
+ for frame_filter in self._options.incoming_frame_filters:
+ frame_filter.filter(frame)
+
+ if frame.rsv1 or frame.rsv2 or frame.rsv3:
+ raise UnsupportedFrameException(
+ 'Unsupported flag is set (rsv = %d%d%d)' %
+ (frame.rsv1, frame.rsv2, frame.rsv3))
+
+ if frame.opcode == common.OPCODE_CONTINUATION:
+ if not self._received_fragments:
+ if frame.fin:
+ raise InvalidFrameException(
+ 'Received a termination frame but fragmentation '
+ 'not started')
+ else:
+ raise InvalidFrameException(
+ 'Received an intermediate frame but '
+ 'fragmentation not started')
+
+ if frame.fin:
+ # End of fragmentation frame
+ self._received_fragments.append(frame.payload)
+ message = ''.join(self._received_fragments)
+ self._received_fragments = []
+ else:
+ # Intermediate frame
+ self._received_fragments.append(frame.payload)
+ continue
+ else:
+ if self._received_fragments:
+ if frame.fin:
+ raise InvalidFrameException(
+ 'Received an unfragmented frame without '
+ 'terminating existing fragmentation')
+ else:
+ raise InvalidFrameException(
+ 'New fragmentation started without terminating '
+ 'existing fragmentation')
+
+ if frame.fin:
+ # Unfragmented frame
+
+ self._original_opcode = frame.opcode
+ message = frame.payload
+ else:
+ # Start of fragmentation frame
+
+ if common.is_control_opcode(frame.opcode):
+ raise InvalidFrameException(
+ 'Control frames must not be fragmented')
+
+ self._original_opcode = frame.opcode
+ self._received_fragments.append(frame.payload)
+ continue
+
+ if self._original_opcode == common.OPCODE_TEXT:
+ # The WebSocket protocol section 4.4 specifies that invalid
+ # characters must be replaced with U+fffd REPLACEMENT
+ # CHARACTER.
+ try:
+ return message.decode('utf-8')
+ except UnicodeDecodeError, e:
+ raise InvalidUTF8Exception(e)
+ elif self._original_opcode == common.OPCODE_BINARY:
+ return message
+ elif self._original_opcode == common.OPCODE_CLOSE:
+ self._request.client_terminated = True
+
+ # Status code is optional. We can have status reason only if we
+ # have status code. Status reason can be empty string. So,
+ # allowed cases are
+ # - no application data: no code no reason
+ # - 2 octet of application data: has code but no reason
+ # - 3 or more octet of application data: both code and reason
+ if len(message) == 0:
+ self._logger.debug('Received close frame (empty body)')
+ self._request.ws_close_code = (
+ common.STATUS_NO_STATUS_RECEIVED)
+ elif len(message) == 1:
+ raise InvalidFrameException(
+ 'If a close frame has status code, the length of '
+ 'status code must be 2 octet')
+ elif len(message) >= 2:
+ self._request.ws_close_code = struct.unpack(
+ '!H', message[0:2])[0]
+ self._request.ws_close_reason = message[2:].decode(
+ 'utf-8', 'replace')
+ self._logger.debug(
+ 'Received close frame (code=%d, reason=%r)',
+ self._request.ws_close_code,
+ self._request.ws_close_reason)
+
+ # Drain junk data after the close frame if necessary.
+ self._drain_received_data()
+
+ if self._request.server_terminated:
+ self._logger.debug(
+ 'Received ack for server-initiated closing handshake')
+ return None
+
+ self._logger.debug(
+ 'Received client-initiated closing handshake')
+
+ code = common.STATUS_NORMAL_CLOSURE
+ reason = ''
+ if hasattr(self._request, '_dispatcher'):
+ dispatcher = self._request._dispatcher
+ code, reason = dispatcher.passive_closing_handshake(
+ self._request)
+ if code is None and reason is not None and len(reason) > 0:
+ self._logger.warning(
+ 'Handler specified reason despite code being None')
+ reason = ''
+ if reason is None:
+ reason = ''
+ self._send_closing_handshake(code, reason)
+ self._logger.debug(
+ 'Sent ack for client-initiated closing handshake '
+ '(code=%r, reason=%r)', code, reason)
+ return None
+ elif self._original_opcode == common.OPCODE_PING:
+ try:
+ handler = self._request.on_ping_handler
+ if handler:
+ handler(self._request, message)
+ continue
+ except AttributeError, e:
+ pass
+ self._send_pong(message)
+ elif self._original_opcode == common.OPCODE_PONG:
+ # TODO(tyoshino): Add ping timeout handling.
+
+ inflight_pings = deque()
+
+ while True:
+ try:
+ expected_body = self._ping_queue.popleft()
+ if expected_body == message:
+ # inflight_pings contains pings ignored by the
+ # other peer. Just forget them.
+ self._logger.debug(
+ 'Ping %r is acked (%d pings were ignored)',
+ expected_body, len(inflight_pings))
+ break
+ else:
+ inflight_pings.append(expected_body)
+ except IndexError, e:
+ # The received pong was unsolicited pong. Keep the
+ # ping queue as is.
+ self._ping_queue = inflight_pings
+ self._logger.debug('Received a unsolicited pong')
+ break
+
+ try:
+ handler = self._request.on_pong_handler
+ if handler:
+ handler(self._request, message)
+ continue
+ except AttributeError, e:
+ pass
+
+ continue
+ else:
+ raise UnsupportedFrameException(
+ 'Opcode %d is not supported' % self._original_opcode)
+
+ def _send_closing_handshake(self, code, reason):
+ body = ''
+ if code is not None:
+ if (code > common.STATUS_USER_PRIVATE_MAX or
+ code < common.STATUS_NORMAL_CLOSURE):
+ raise BadOperationException('Status code is out of range')
+ if (code == common.STATUS_NO_STATUS_RECEIVED or
+ code == common.STATUS_ABNORMAL_CLOSURE or
+ code == common.STATUS_TLS_HANDSHAKE):
+ raise BadOperationException('Status code is reserved pseudo '
+ 'code')
+ encoded_reason = reason.encode('utf-8')
+ body = struct.pack('!H', code) + encoded_reason
+
+ frame = create_close_frame(
+ body,
+ self._options.mask_send,
+ self._options.outgoing_frame_filters)
+
+ self._request.server_terminated = True
+
+ self._write(frame)
+
+ def close_connection(self, code=common.STATUS_NORMAL_CLOSURE, reason=''):
+ """Closes a WebSocket connection.
+
+ Args:
+ code: Status code for close frame. If code is None, a close
+ frame with empty body will be sent.
+ reason: string representing close reason.
+ Raises:
+ BadOperationException: when reason is specified with code None
+ or reason is not an instance of both str and unicode.
+ """
+
+ if self._request.server_terminated:
+ self._logger.debug(
+ 'Requested close_connection but server is already terminated')
+ return
+
+ if code is None:
+ if reason is not None and len(reason) > 0:
+ raise BadOperationException(
+ 'close reason must not be specified if code is None')
+ reason = ''
+ else:
+ if not isinstance(reason, str) and not isinstance(reason, unicode):
+ raise BadOperationException(
+ 'close reason must be an instance of str or unicode')
+
+ self._send_closing_handshake(code, reason)
+ self._logger.debug(
+ 'Sent server-initiated closing handshake (code=%r, reason=%r)',
+ code, reason)
+
+ if (code == common.STATUS_GOING_AWAY or
+ code == common.STATUS_PROTOCOL_ERROR):
+ # It doesn't make sense to wait for a close frame if the reason is
+ # protocol error or that the server is going away. For some of
+ # other reasons, it might not make sense to wait for a close frame,
+ # but it's not clear, yet.
+ return
+
+ # TODO(ukai): 2. wait until the /client terminated/ flag has been set,
+ # or until a server-defined timeout expires.
+ #
+ # For now, we expect receiving closing handshake right after sending
+ # out closing handshake.
+ message = self.receive_message()
+ if message is not None:
+ raise ConnectionTerminatedException(
+ 'Didn\'t receive valid ack for closing handshake')
+ # TODO: 3. close the WebSocket connection.
+ # note: mod_python Connection (mp_conn) doesn't have close method.
+
+ def send_ping(self, body=''):
+ frame = create_ping_frame(
+ body,
+ self._options.mask_send,
+ self._options.outgoing_frame_filters)
+ self._write(frame)
+
+ self._ping_queue.append(body)
+
+ def _send_pong(self, body):
+ frame = create_pong_frame(
+ body,
+ self._options.mask_send,
+ self._options.outgoing_frame_filters)
+ self._write(frame)
+
+ def _drain_received_data(self):
+ """Drains unread data in the receive buffer to avoid sending out TCP
+ RST packet. This is because when deflate-stream is enabled, some
+ DEFLATE block for flushing data may follow a close frame. If any data
+ remains in the receive buffer of a socket when the socket is closed,
+ it sends out TCP RST packet to the other peer.
+
+ Since mod_python's mp_conn object doesn't support non-blocking read,
+ we perform this only when pywebsocket is running in standalone mode.
+ """
+
+ # If self._options.deflate_stream is true, self._request is
+ # DeflateRequest, so we can get wrapped request object by
+ # self._request._request.
+ #
+ # Only _StandaloneRequest has _drain_received_data method.
+ if (self._options.deflate_stream and
+ ('_drain_received_data' in dir(self._request._request))):
+ self._request._request._drain_received_data()
+
+
+# vi:sts=4 sw=4 et
diff --git a/module/lib/mod_pywebsocket/common.py b/module/lib/mod_pywebsocket/common.py
new file mode 100644
index 000000000..710967c80
--- /dev/null
+++ b/module/lib/mod_pywebsocket/common.py
@@ -0,0 +1,304 @@
+# Copyright 2012, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""This file must not depend on any module specific to the WebSocket protocol.
+"""
+
+
+from mod_pywebsocket import http_header_util
+
+
+# Additional log level definitions.
+LOGLEVEL_FINE = 9
+
+# Constants indicating WebSocket protocol version.
+VERSION_HIXIE75 = -1
+VERSION_HYBI00 = 0
+VERSION_HYBI01 = 1
+VERSION_HYBI02 = 2
+VERSION_HYBI03 = 2
+VERSION_HYBI04 = 4
+VERSION_HYBI05 = 5
+VERSION_HYBI06 = 6
+VERSION_HYBI07 = 7
+VERSION_HYBI08 = 8
+VERSION_HYBI09 = 8
+VERSION_HYBI10 = 8
+VERSION_HYBI11 = 8
+VERSION_HYBI12 = 8
+VERSION_HYBI13 = 13
+VERSION_HYBI14 = 13
+VERSION_HYBI15 = 13
+VERSION_HYBI16 = 13
+VERSION_HYBI17 = 13
+
+# Constants indicating WebSocket protocol latest version.
+VERSION_HYBI_LATEST = VERSION_HYBI13
+
+# Port numbers
+DEFAULT_WEB_SOCKET_PORT = 80
+DEFAULT_WEB_SOCKET_SECURE_PORT = 443
+
+# Schemes
+WEB_SOCKET_SCHEME = 'ws'
+WEB_SOCKET_SECURE_SCHEME = 'wss'
+
+# Frame opcodes defined in the spec.
+OPCODE_CONTINUATION = 0x0
+OPCODE_TEXT = 0x1
+OPCODE_BINARY = 0x2
+OPCODE_CLOSE = 0x8
+OPCODE_PING = 0x9
+OPCODE_PONG = 0xa
+
+# UUIDs used by HyBi 04 and later opening handshake and frame masking.
+WEBSOCKET_ACCEPT_UUID = '258EAFA5-E914-47DA-95CA-C5AB0DC85B11'
+
+# Opening handshake header names and expected values.
+UPGRADE_HEADER = 'Upgrade'
+WEBSOCKET_UPGRADE_TYPE = 'websocket'
+WEBSOCKET_UPGRADE_TYPE_HIXIE75 = 'WebSocket'
+CONNECTION_HEADER = 'Connection'
+UPGRADE_CONNECTION_TYPE = 'Upgrade'
+HOST_HEADER = 'Host'
+ORIGIN_HEADER = 'Origin'
+SEC_WEBSOCKET_ORIGIN_HEADER = 'Sec-WebSocket-Origin'
+SEC_WEBSOCKET_KEY_HEADER = 'Sec-WebSocket-Key'
+SEC_WEBSOCKET_ACCEPT_HEADER = 'Sec-WebSocket-Accept'
+SEC_WEBSOCKET_VERSION_HEADER = 'Sec-WebSocket-Version'
+SEC_WEBSOCKET_PROTOCOL_HEADER = 'Sec-WebSocket-Protocol'
+SEC_WEBSOCKET_EXTENSIONS_HEADER = 'Sec-WebSocket-Extensions'
+SEC_WEBSOCKET_DRAFT_HEADER = 'Sec-WebSocket-Draft'
+SEC_WEBSOCKET_KEY1_HEADER = 'Sec-WebSocket-Key1'
+SEC_WEBSOCKET_KEY2_HEADER = 'Sec-WebSocket-Key2'
+SEC_WEBSOCKET_LOCATION_HEADER = 'Sec-WebSocket-Location'
+
+# Extensions
+DEFLATE_STREAM_EXTENSION = 'deflate-stream'
+DEFLATE_FRAME_EXTENSION = 'deflate-frame'
+PERFRAME_COMPRESSION_EXTENSION = 'perframe-compress'
+X_WEBKIT_DEFLATE_FRAME_EXTENSION = 'x-webkit-deflate-frame'
+
+# Status codes
+# Code STATUS_NO_STATUS_RECEIVED, STATUS_ABNORMAL_CLOSURE, and
+# STATUS_TLS_HANDSHAKE are pseudo codes to indicate specific error cases.
+# Could not be used for codes in actual closing frames.
+# Application level errors must use codes in the range
+# STATUS_USER_REGISTERED_BASE to STATUS_USER_PRIVATE_MAX. The codes in the
+# range STATUS_USER_REGISTERED_BASE to STATUS_USER_REGISTERED_MAX are managed
+# by IANA. Usually application must define user protocol level errors in the
+# range STATUS_USER_PRIVATE_BASE to STATUS_USER_PRIVATE_MAX.
+STATUS_NORMAL_CLOSURE = 1000
+STATUS_GOING_AWAY = 1001
+STATUS_PROTOCOL_ERROR = 1002
+STATUS_UNSUPPORTED_DATA = 1003
+STATUS_NO_STATUS_RECEIVED = 1005
+STATUS_ABNORMAL_CLOSURE = 1006
+STATUS_INVALID_FRAME_PAYLOAD_DATA = 1007
+STATUS_POLICY_VIOLATION = 1008
+STATUS_MESSAGE_TOO_BIG = 1009
+STATUS_MANDATORY_EXTENSION = 1010
+STATUS_INTERNAL_SERVER_ERROR = 1011
+STATUS_TLS_HANDSHAKE = 1015
+STATUS_USER_REGISTERED_BASE = 3000
+STATUS_USER_REGISTERED_MAX = 3999
+STATUS_USER_PRIVATE_BASE = 4000
+STATUS_USER_PRIVATE_MAX = 4999
+# Following definitions are aliases to keep compatibility. Applications must
+# not use these obsoleted definitions anymore.
+STATUS_NORMAL = STATUS_NORMAL_CLOSURE
+STATUS_UNSUPPORTED = STATUS_UNSUPPORTED_DATA
+STATUS_CODE_NOT_AVAILABLE = STATUS_NO_STATUS_RECEIVED
+STATUS_ABNORMAL_CLOSE = STATUS_ABNORMAL_CLOSURE
+STATUS_INVALID_FRAME_PAYLOAD = STATUS_INVALID_FRAME_PAYLOAD_DATA
+STATUS_MANDATORY_EXT = STATUS_MANDATORY_EXTENSION
+
+# HTTP status codes
+HTTP_STATUS_BAD_REQUEST = 400
+HTTP_STATUS_FORBIDDEN = 403
+HTTP_STATUS_NOT_FOUND = 404
+
+
+def is_control_opcode(opcode):
+ return (opcode >> 3) == 1
+
+
+class ExtensionParameter(object):
+ """Holds information about an extension which is exchanged on extension
+ negotiation in opening handshake.
+ """
+
+ def __init__(self, name):
+ self._name = name
+ # TODO(tyoshino): Change the data structure to more efficient one such
+ # as dict when the spec changes to say like
+ # - Parameter names must be unique
+ # - The order of parameters is not significant
+ self._parameters = []
+
+ def name(self):
+ return self._name
+
+ def add_parameter(self, name, value):
+ self._parameters.append((name, value))
+
+ def get_parameters(self):
+ return self._parameters
+
+ def get_parameter_names(self):
+ return [name for name, unused_value in self._parameters]
+
+ def has_parameter(self, name):
+ for param_name, param_value in self._parameters:
+ if param_name == name:
+ return True
+ return False
+
+ def get_parameter_value(self, name):
+ for param_name, param_value in self._parameters:
+ if param_name == name:
+ return param_value
+
+
+class ExtensionParsingException(Exception):
+ def __init__(self, name):
+ super(ExtensionParsingException, self).__init__(name)
+
+
+def _parse_extension_param(state, definition, allow_quoted_string):
+ param_name = http_header_util.consume_token(state)
+
+ if param_name is None:
+ raise ExtensionParsingException('No valid parameter name found')
+
+ http_header_util.consume_lwses(state)
+
+ if not http_header_util.consume_string(state, '='):
+ definition.add_parameter(param_name, None)
+ return
+
+ http_header_util.consume_lwses(state)
+
+ if allow_quoted_string:
+ # TODO(toyoshim): Add code to validate that parsed param_value is token
+ param_value = http_header_util.consume_token_or_quoted_string(state)
+ else:
+ param_value = http_header_util.consume_token(state)
+ if param_value is None:
+ raise ExtensionParsingException(
+ 'No valid parameter value found on the right-hand side of '
+ 'parameter %r' % param_name)
+
+ definition.add_parameter(param_name, param_value)
+
+
+def _parse_extension(state, allow_quoted_string):
+ extension_token = http_header_util.consume_token(state)
+ if extension_token is None:
+ return None
+
+ extension = ExtensionParameter(extension_token)
+
+ while True:
+ http_header_util.consume_lwses(state)
+
+ if not http_header_util.consume_string(state, ';'):
+ break
+
+ http_header_util.consume_lwses(state)
+
+ try:
+ _parse_extension_param(state, extension, allow_quoted_string)
+ except ExtensionParsingException, e:
+ raise ExtensionParsingException(
+ 'Failed to parse parameter for %r (%r)' %
+ (extension_token, e))
+
+ return extension
+
+
+def parse_extensions(data, allow_quoted_string=False):
+ """Parses Sec-WebSocket-Extensions header value returns a list of
+ ExtensionParameter objects.
+
+ Leading LWSes must be trimmed.
+ """
+
+ state = http_header_util.ParsingState(data)
+
+ extension_list = []
+ while True:
+ extension = _parse_extension(state, allow_quoted_string)
+ if extension is not None:
+ extension_list.append(extension)
+
+ http_header_util.consume_lwses(state)
+
+ if http_header_util.peek(state) is None:
+ break
+
+ if not http_header_util.consume_string(state, ','):
+ raise ExtensionParsingException(
+ 'Failed to parse Sec-WebSocket-Extensions header: '
+ 'Expected a comma but found %r' %
+ http_header_util.peek(state))
+
+ http_header_util.consume_lwses(state)
+
+ if len(extension_list) == 0:
+ raise ExtensionParsingException(
+ 'No valid extension entry found')
+
+ return extension_list
+
+
+def format_extension(extension):
+ """Formats an ExtensionParameter object."""
+
+ formatted_params = [extension.name()]
+ for param_name, param_value in extension.get_parameters():
+ if param_value is None:
+ formatted_params.append(param_name)
+ else:
+ quoted_value = http_header_util.quote_if_necessary(param_value)
+ formatted_params.append('%s=%s' % (param_name, quoted_value))
+ return '; '.join(formatted_params)
+
+
+def format_extensions(extension_list):
+ """Formats a list of ExtensionParameter objects."""
+
+ formatted_extension_list = []
+ for extension in extension_list:
+ formatted_extension_list.append(format_extension(extension))
+ return ', '.join(formatted_extension_list)
+
+
+# vi:sts=4 sw=4 et
diff --git a/module/lib/mod_pywebsocket/dispatch.py b/module/lib/mod_pywebsocket/dispatch.py
new file mode 100644
index 000000000..ab1eb4fb3
--- /dev/null
+++ b/module/lib/mod_pywebsocket/dispatch.py
@@ -0,0 +1,381 @@
+# Copyright 2012, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Dispatch WebSocket request.
+"""
+
+
+import logging
+import os
+import re
+
+from mod_pywebsocket import common
+from mod_pywebsocket import handshake
+from mod_pywebsocket import msgutil
+from mod_pywebsocket import stream
+from mod_pywebsocket import util
+
+
+_SOURCE_PATH_PATTERN = re.compile(r'(?i)_wsh\.py$')
+_SOURCE_SUFFIX = '_wsh.py'
+_DO_EXTRA_HANDSHAKE_HANDLER_NAME = 'web_socket_do_extra_handshake'
+_TRANSFER_DATA_HANDLER_NAME = 'web_socket_transfer_data'
+_PASSIVE_CLOSING_HANDSHAKE_HANDLER_NAME = (
+ 'web_socket_passive_closing_handshake')
+
+
+class DispatchException(Exception):
+ """Exception in dispatching WebSocket request."""
+
+ def __init__(self, name, status=common.HTTP_STATUS_NOT_FOUND):
+ super(DispatchException, self).__init__(name)
+ self.status = status
+
+
+def _default_passive_closing_handshake_handler(request):
+ """Default web_socket_passive_closing_handshake handler."""
+
+ return common.STATUS_NORMAL_CLOSURE, ''
+
+
+def _normalize_path(path):
+ """Normalize path.
+
+ Args:
+ path: the path to normalize.
+
+ Path is converted to the absolute path.
+ The input path can use either '\\' or '/' as the separator.
+ The normalized path always uses '/' regardless of the platform.
+ """
+
+ path = path.replace('\\', os.path.sep)
+ path = os.path.realpath(path)
+ path = path.replace('\\', '/')
+ return path
+
+
+def _create_path_to_resource_converter(base_dir):
+ """Returns a function that converts the path of a WebSocket handler source
+ file to a resource string by removing the path to the base directory from
+ its head, removing _SOURCE_SUFFIX from its tail, and replacing path
+ separators in it with '/'.
+
+ Args:
+ base_dir: the path to the base directory.
+ """
+
+ base_dir = _normalize_path(base_dir)
+
+ base_len = len(base_dir)
+ suffix_len = len(_SOURCE_SUFFIX)
+
+ def converter(path):
+ if not path.endswith(_SOURCE_SUFFIX):
+ return None
+ # _normalize_path must not be used because resolving symlink breaks
+ # following path check.
+ path = path.replace('\\', '/')
+ if not path.startswith(base_dir):
+ return None
+ return path[base_len:-suffix_len]
+
+ return converter
+
+
+def _enumerate_handler_file_paths(directory):
+ """Returns a generator that enumerates WebSocket Handler source file names
+ in the given directory.
+ """
+
+ for root, unused_dirs, files in os.walk(directory):
+ for base in files:
+ path = os.path.join(root, base)
+ if _SOURCE_PATH_PATTERN.search(path):
+ yield path
+
+
+class _HandlerSuite(object):
+ """A handler suite holder class."""
+
+ def __init__(self, do_extra_handshake, transfer_data,
+ passive_closing_handshake):
+ self.do_extra_handshake = do_extra_handshake
+ self.transfer_data = transfer_data
+ self.passive_closing_handshake = passive_closing_handshake
+
+
+def _source_handler_file(handler_definition):
+ """Source a handler definition string.
+
+ Args:
+ handler_definition: a string containing Python statements that define
+ handler functions.
+ """
+
+ global_dic = {}
+ try:
+ exec handler_definition in global_dic
+ except Exception:
+ raise DispatchException('Error in sourcing handler:' +
+ util.get_stack_trace())
+ passive_closing_handshake_handler = None
+ try:
+ passive_closing_handshake_handler = _extract_handler(
+ global_dic, _PASSIVE_CLOSING_HANDSHAKE_HANDLER_NAME)
+ except Exception:
+ passive_closing_handshake_handler = (
+ _default_passive_closing_handshake_handler)
+ return _HandlerSuite(
+ _extract_handler(global_dic, _DO_EXTRA_HANDSHAKE_HANDLER_NAME),
+ _extract_handler(global_dic, _TRANSFER_DATA_HANDLER_NAME),
+ passive_closing_handshake_handler)
+
+
+def _extract_handler(dic, name):
+ """Extracts a callable with the specified name from the given dictionary
+ dic.
+ """
+
+ if name not in dic:
+ raise DispatchException('%s is not defined.' % name)
+ handler = dic[name]
+ if not callable(handler):
+ raise DispatchException('%s is not callable.' % name)
+ return handler
+
+
+class Dispatcher(object):
+ """Dispatches WebSocket requests.
+
+ This class maintains a map from resource name to handlers.
+ """
+
+ def __init__(
+ self, root_dir, scan_dir=None,
+ allow_handlers_outside_root_dir=True):
+ """Construct an instance.
+
+ Args:
+ root_dir: The directory where handler definition files are
+ placed.
+ scan_dir: The directory where handler definition files are
+ searched. scan_dir must be a directory under root_dir,
+ including root_dir itself. If scan_dir is None,
+ root_dir is used as scan_dir. scan_dir can be useful
+ in saving scan time when root_dir contains many
+ subdirectories.
+ allow_handlers_outside_root_dir: Scans handler files even if their
+ canonical path is not under root_dir.
+ """
+
+ self._logger = util.get_class_logger(self)
+
+ self._handler_suite_map = {}
+ self._source_warnings = []
+ if scan_dir is None:
+ scan_dir = root_dir
+ if not os.path.realpath(scan_dir).startswith(
+ os.path.realpath(root_dir)):
+ raise DispatchException('scan_dir:%s must be a directory under '
+ 'root_dir:%s.' % (scan_dir, root_dir))
+ self._source_handler_files_in_dir(
+ root_dir, scan_dir, allow_handlers_outside_root_dir)
+
+ def add_resource_path_alias(self,
+ alias_resource_path, existing_resource_path):
+ """Add resource path alias.
+
+ Once added, request to alias_resource_path would be handled by
+ handler registered for existing_resource_path.
+
+ Args:
+ alias_resource_path: alias resource path
+ existing_resource_path: existing resource path
+ """
+ try:
+ handler_suite = self._handler_suite_map[existing_resource_path]
+ self._handler_suite_map[alias_resource_path] = handler_suite
+ except KeyError:
+ raise DispatchException('No handler for: %r' %
+ existing_resource_path)
+
+ def source_warnings(self):
+ """Return warnings in sourcing handlers."""
+
+ return self._source_warnings
+
+ def do_extra_handshake(self, request):
+ """Do extra checking in WebSocket handshake.
+
+ Select a handler based on request.uri and call its
+ web_socket_do_extra_handshake function.
+
+ Args:
+ request: mod_python request.
+
+ Raises:
+ DispatchException: when handler was not found
+ AbortedByUserException: when user handler abort connection
+ HandshakeException: when opening handshake failed
+ """
+
+ handler_suite = self.get_handler_suite(request.ws_resource)
+ if handler_suite is None:
+ raise DispatchException('No handler for: %r' % request.ws_resource)
+ do_extra_handshake_ = handler_suite.do_extra_handshake
+ try:
+ do_extra_handshake_(request)
+ except handshake.AbortedByUserException, e:
+ raise
+ except Exception, e:
+ util.prepend_message_to_exception(
+ '%s raised exception for %s: ' % (
+ _DO_EXTRA_HANDSHAKE_HANDLER_NAME,
+ request.ws_resource),
+ e)
+ raise handshake.HandshakeException(e, common.HTTP_STATUS_FORBIDDEN)
+
+ def transfer_data(self, request):
+ """Let a handler transfer_data with a WebSocket client.
+
+ Select a handler based on request.ws_resource and call its
+ web_socket_transfer_data function.
+
+ Args:
+ request: mod_python request.
+
+ Raises:
+ DispatchException: when handler was not found
+ AbortedByUserException: when user handler abort connection
+ """
+
+ handler_suite = self.get_handler_suite(request.ws_resource)
+ if handler_suite is None:
+ raise DispatchException('No handler for: %r' % request.ws_resource)
+ transfer_data_ = handler_suite.transfer_data
+ # TODO(tyoshino): Terminate underlying TCP connection if possible.
+ try:
+ transfer_data_(request)
+ if not request.server_terminated:
+ request.ws_stream.close_connection()
+ # Catch non-critical exceptions the handler didn't handle.
+ except handshake.AbortedByUserException, e:
+ self._logger.debug('%s', e)
+ raise
+ except msgutil.BadOperationException, e:
+ self._logger.debug('%s', e)
+ request.ws_stream.close_connection(common.STATUS_ABNORMAL_CLOSURE)
+ except msgutil.InvalidFrameException, e:
+ # InvalidFrameException must be caught before
+ # ConnectionTerminatedException that catches InvalidFrameException.
+ self._logger.debug('%s', e)
+ request.ws_stream.close_connection(common.STATUS_PROTOCOL_ERROR)
+ except msgutil.UnsupportedFrameException, e:
+ self._logger.debug('%s', e)
+ request.ws_stream.close_connection(common.STATUS_UNSUPPORTED_DATA)
+ except stream.InvalidUTF8Exception, e:
+ self._logger.debug('%s', e)
+ request.ws_stream.close_connection(
+ common.STATUS_INVALID_FRAME_PAYLOAD_DATA)
+ except msgutil.ConnectionTerminatedException, e:
+ self._logger.debug('%s', e)
+ except Exception, e:
+ util.prepend_message_to_exception(
+ '%s raised exception for %s: ' % (
+ _TRANSFER_DATA_HANDLER_NAME, request.ws_resource),
+ e)
+ raise
+
+ def passive_closing_handshake(self, request):
+ """Prepare code and reason for responding client initiated closing
+ handshake.
+ """
+
+ handler_suite = self.get_handler_suite(request.ws_resource)
+ if handler_suite is None:
+ return _default_passive_closing_handshake_handler(request)
+ return handler_suite.passive_closing_handshake(request)
+
+ def get_handler_suite(self, resource):
+ """Retrieves two handlers (one for extra handshake processing, and one
+ for data transfer) for the given request as a HandlerSuite object.
+ """
+
+ fragment = None
+ if '#' in resource:
+ resource, fragment = resource.split('#', 1)
+ if '?' in resource:
+ resource = resource.split('?', 1)[0]
+ handler_suite = self._handler_suite_map.get(resource)
+ if handler_suite and fragment:
+ raise DispatchException('Fragment identifiers MUST NOT be used on '
+ 'WebSocket URIs',
+ common.HTTP_STATUS_BAD_REQUEST)
+ return handler_suite
+
+ def _source_handler_files_in_dir(
+ self, root_dir, scan_dir, allow_handlers_outside_root_dir):
+ """Source all the handler source files in the scan_dir directory.
+
+ The resource path is determined relative to root_dir.
+ """
+
+ # We build a map from resource to handler code assuming that there's
+ # only one path from root_dir to scan_dir and it can be obtained by
+ # comparing realpath of them.
+
+ # Here we cannot use abspath. See
+ # https://bugs.webkit.org/show_bug.cgi?id=31603
+
+ convert = _create_path_to_resource_converter(root_dir)
+ scan_realpath = os.path.realpath(scan_dir)
+ root_realpath = os.path.realpath(root_dir)
+ for path in _enumerate_handler_file_paths(scan_realpath):
+ if (not allow_handlers_outside_root_dir and
+ (not os.path.realpath(path).startswith(root_realpath))):
+ self._logger.debug(
+ 'Canonical path of %s is not under root directory' %
+ path)
+ continue
+ try:
+ handler_suite = _source_handler_file(open(path).read())
+ except DispatchException, e:
+ self._source_warnings.append('%s: %s' % (path, e))
+ continue
+ resource = convert(path)
+ if resource is None:
+ self._logger.debug(
+ 'Path to resource conversion on %s failed' % path)
+ else:
+ self._handler_suite_map[convert(path)] = handler_suite
+
+
+# vi:sts=4 sw=4 et
diff --git a/module/lib/mod_pywebsocket/extensions.py b/module/lib/mod_pywebsocket/extensions.py
new file mode 100644
index 000000000..52b7a4a19
--- /dev/null
+++ b/module/lib/mod_pywebsocket/extensions.py
@@ -0,0 +1,356 @@
+# Copyright 2012, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+from mod_pywebsocket import common
+from mod_pywebsocket import util
+from mod_pywebsocket.http_header_util import quote_if_necessary
+
+
+_available_processors = {}
+
+
+class ExtensionProcessorInterface(object):
+
+ def get_extension_response(self):
+ return None
+
+ def setup_stream_options(self, stream_options):
+ pass
+
+
+class DeflateStreamExtensionProcessor(ExtensionProcessorInterface):
+ """WebSocket DEFLATE stream extension processor."""
+
+ def __init__(self, request):
+ self._logger = util.get_class_logger(self)
+
+ self._request = request
+
+ def get_extension_response(self):
+ if len(self._request.get_parameter_names()) != 0:
+ return None
+
+ self._logger.debug(
+ 'Enable %s extension', common.DEFLATE_STREAM_EXTENSION)
+
+ return common.ExtensionParameter(common.DEFLATE_STREAM_EXTENSION)
+
+ def setup_stream_options(self, stream_options):
+ stream_options.deflate_stream = True
+
+
+_available_processors[common.DEFLATE_STREAM_EXTENSION] = (
+ DeflateStreamExtensionProcessor)
+
+
+class DeflateFrameExtensionProcessor(ExtensionProcessorInterface):
+ """WebSocket Per-frame DEFLATE extension processor."""
+
+ _WINDOW_BITS_PARAM = 'max_window_bits'
+ _NO_CONTEXT_TAKEOVER_PARAM = 'no_context_takeover'
+
+ def __init__(self, request):
+ self._logger = util.get_class_logger(self)
+
+ self._request = request
+
+ self._response_window_bits = None
+ self._response_no_context_takeover = False
+
+ # Counters for statistics.
+
+ # Total number of outgoing bytes supplied to this filter.
+ self._total_outgoing_payload_bytes = 0
+ # Total number of bytes sent to the network after applying this filter.
+ self._total_filtered_outgoing_payload_bytes = 0
+
+ # Total number of bytes received from the network.
+ self._total_incoming_payload_bytes = 0
+ # Total number of incoming bytes obtained after applying this filter.
+ self._total_filtered_incoming_payload_bytes = 0
+
+ def get_extension_response(self):
+ # Any unknown parameter will be just ignored.
+
+ window_bits = self._request.get_parameter_value(
+ self._WINDOW_BITS_PARAM)
+ no_context_takeover = self._request.has_parameter(
+ self._NO_CONTEXT_TAKEOVER_PARAM)
+ if (no_context_takeover and
+ self._request.get_parameter_value(
+ self._NO_CONTEXT_TAKEOVER_PARAM) is not None):
+ return None
+
+ if window_bits is not None:
+ try:
+ window_bits = int(window_bits)
+ except ValueError, e:
+ return None
+ if window_bits < 8 or window_bits > 15:
+ return None
+
+ self._deflater = util._RFC1979Deflater(
+ window_bits, no_context_takeover)
+
+ self._inflater = util._RFC1979Inflater()
+
+ self._compress_outgoing = True
+
+ response = common.ExtensionParameter(self._request.name())
+
+ if self._response_window_bits is not None:
+ response.add_parameter(
+ self._WINDOW_BITS_PARAM, str(self._response_window_bits))
+ if self._response_no_context_takeover:
+ response.add_parameter(
+ self._NO_CONTEXT_TAKEOVER_PARAM, None)
+
+ self._logger.debug(
+ 'Enable %s extension ('
+ 'request: window_bits=%s; no_context_takeover=%r, '
+ 'response: window_wbits=%s; no_context_takeover=%r)' %
+ (self._request.name(),
+ window_bits,
+ no_context_takeover,
+ self._response_window_bits,
+ self._response_no_context_takeover))
+
+ return response
+
+ def setup_stream_options(self, stream_options):
+
+ class _OutgoingFilter(object):
+
+ def __init__(self, parent):
+ self._parent = parent
+
+ def filter(self, frame):
+ self._parent._outgoing_filter(frame)
+
+ class _IncomingFilter(object):
+
+ def __init__(self, parent):
+ self._parent = parent
+
+ def filter(self, frame):
+ self._parent._incoming_filter(frame)
+
+ stream_options.outgoing_frame_filters.append(
+ _OutgoingFilter(self))
+ stream_options.incoming_frame_filters.insert(
+ 0, _IncomingFilter(self))
+
+ def set_response_window_bits(self, value):
+ self._response_window_bits = value
+
+ def set_response_no_context_takeover(self, value):
+ self._response_no_context_takeover = value
+
+ def enable_outgoing_compression(self):
+ self._compress_outgoing = True
+
+ def disable_outgoing_compression(self):
+ self._compress_outgoing = False
+
+ def _outgoing_filter(self, frame):
+ """Transform outgoing frames. This method is called only by
+ an _OutgoingFilter instance.
+ """
+
+ original_payload_size = len(frame.payload)
+ self._total_outgoing_payload_bytes += original_payload_size
+
+ if (not self._compress_outgoing or
+ common.is_control_opcode(frame.opcode)):
+ self._total_filtered_outgoing_payload_bytes += (
+ original_payload_size)
+ return
+
+ frame.payload = self._deflater.filter(frame.payload)
+ frame.rsv1 = 1
+
+ filtered_payload_size = len(frame.payload)
+ self._total_filtered_outgoing_payload_bytes += filtered_payload_size
+
+ # Print inf when ratio is not available.
+ ratio = float('inf')
+ average_ratio = float('inf')
+ if original_payload_size != 0:
+ ratio = float(filtered_payload_size) / original_payload_size
+ if self._total_outgoing_payload_bytes != 0:
+ average_ratio = (
+ float(self._total_filtered_outgoing_payload_bytes) /
+ self._total_outgoing_payload_bytes)
+ self._logger.debug(
+ 'Outgoing compress ratio: %f (average: %f)' %
+ (ratio, average_ratio))
+
+ def _incoming_filter(self, frame):
+ """Transform incoming frames. This method is called only by
+ an _IncomingFilter instance.
+ """
+
+ received_payload_size = len(frame.payload)
+ self._total_incoming_payload_bytes += received_payload_size
+
+ if frame.rsv1 != 1 or common.is_control_opcode(frame.opcode):
+ self._total_filtered_incoming_payload_bytes += (
+ received_payload_size)
+ return
+
+ frame.payload = self._inflater.filter(frame.payload)
+ frame.rsv1 = 0
+
+ filtered_payload_size = len(frame.payload)
+ self._total_filtered_incoming_payload_bytes += filtered_payload_size
+
+ # Print inf when ratio is not available.
+ ratio = float('inf')
+ average_ratio = float('inf')
+ if received_payload_size != 0:
+ ratio = float(received_payload_size) / filtered_payload_size
+ if self._total_filtered_incoming_payload_bytes != 0:
+ average_ratio = (
+ float(self._total_incoming_payload_bytes) /
+ self._total_filtered_incoming_payload_bytes)
+ self._logger.debug(
+ 'Incoming compress ratio: %f (average: %f)' %
+ (ratio, average_ratio))
+
+
+_available_processors[common.DEFLATE_FRAME_EXTENSION] = (
+ DeflateFrameExtensionProcessor)
+
+
+# Adding vendor-prefixed deflate-frame extension.
+# TODO(bashi): Remove this after WebKit stops using vender prefix.
+_available_processors[common.X_WEBKIT_DEFLATE_FRAME_EXTENSION] = (
+ DeflateFrameExtensionProcessor)
+
+
+def _parse_compression_method(data):
+ """Parses the value of "method" extension parameter."""
+
+ return common.parse_extensions(data, allow_quoted_string=True)
+
+
+def _create_accepted_method_desc(method_name, method_params):
+ """Creates accepted-method-desc from given method name and parameters"""
+
+ extension = common.ExtensionParameter(method_name)
+ for name, value in method_params:
+ extension.add_parameter(name, value)
+ return common.format_extension(extension)
+
+
+class PerFrameCompressionExtensionProcessor(ExtensionProcessorInterface):
+ """WebSocket Per-frame compression extension processor."""
+
+ _METHOD_PARAM = 'method'
+ _DEFLATE_METHOD = 'deflate'
+
+ def __init__(self, request):
+ self._logger = util.get_class_logger(self)
+ self._request = request
+ self._compression_method_name = None
+ self._compression_processor = None
+
+ def _lookup_compression_processor(self, method_desc):
+ if method_desc.name() == self._DEFLATE_METHOD:
+ return DeflateFrameExtensionProcessor(method_desc)
+ return None
+
+ def _get_compression_processor_response(self):
+ """Looks up the compression processor based on the self._request and
+ returns the compression processor's response.
+ """
+
+ method_list = self._request.get_parameter_value(self._METHOD_PARAM)
+ if method_list is None:
+ return None
+ methods = _parse_compression_method(method_list)
+ if methods is None:
+ return None
+ comression_processor = None
+ # The current implementation tries only the first method that matches
+ # supported algorithm. Following methods aren't tried even if the
+ # first one is rejected.
+ # TODO(bashi): Need to clarify this behavior.
+ for method_desc in methods:
+ compression_processor = self._lookup_compression_processor(
+ method_desc)
+ if compression_processor is not None:
+ self._compression_method_name = method_desc.name()
+ break
+ if compression_processor is None:
+ return None
+ processor_response = compression_processor.get_extension_response()
+ if processor_response is None:
+ return None
+ self._compression_processor = compression_processor
+ return processor_response
+
+ def get_extension_response(self):
+ processor_response = self._get_compression_processor_response()
+ if processor_response is None:
+ return None
+
+ response = common.ExtensionParameter(self._request.name())
+ accepted_method_desc = _create_accepted_method_desc(
+ self._compression_method_name,
+ processor_response.get_parameters())
+ response.add_parameter(self._METHOD_PARAM, accepted_method_desc)
+ self._logger.debug(
+ 'Enable %s extension (method: %s)' %
+ (self._request.name(), self._compression_method_name))
+ return response
+
+ def setup_stream_options(self, stream_options):
+ if self._compression_processor is None:
+ return
+ self._compression_processor.setup_stream_options(stream_options)
+
+ def get_compression_processor(self):
+ return self._compression_processor
+
+
+_available_processors[common.PERFRAME_COMPRESSION_EXTENSION] = (
+ PerFrameCompressionExtensionProcessor)
+
+
+def get_extension_processor(extension_request):
+ global _available_processors
+ processor_class = _available_processors.get(extension_request.name())
+ if processor_class is None:
+ return None
+ return processor_class(extension_request)
+
+
+# vi:sts=4 sw=4 et
diff --git a/module/lib/mod_pywebsocket/handshake/__init__.py b/module/lib/mod_pywebsocket/handshake/__init__.py
new file mode 100644
index 000000000..10a178314
--- /dev/null
+++ b/module/lib/mod_pywebsocket/handshake/__init__.py
@@ -0,0 +1,116 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""WebSocket opening handshake processor. This class try to apply available
+opening handshake processors for each protocol version until a connection is
+successfully established.
+"""
+
+
+import logging
+
+from mod_pywebsocket import common
+from mod_pywebsocket.handshake import draft75
+from mod_pywebsocket.handshake import hybi00
+from mod_pywebsocket.handshake import hybi
+# Export AbortedByUserException, HandshakeException, and VersionException
+# symbol from this module.
+from mod_pywebsocket.handshake._base import AbortedByUserException
+from mod_pywebsocket.handshake._base import HandshakeException
+from mod_pywebsocket.handshake._base import VersionException
+
+
+_LOGGER = logging.getLogger(__name__)
+
+
+def do_handshake(request, dispatcher, allowDraft75=False, strict=False):
+ """Performs WebSocket handshake.
+
+ Args:
+ request: mod_python request.
+ dispatcher: Dispatcher (dispatch.Dispatcher).
+ allowDraft75: allow draft 75 handshake protocol.
+ strict: Strictly check handshake request in draft 75.
+ Default: False. If True, request.connection must provide
+ get_memorized_lines method.
+
+ Handshaker will add attributes such as ws_resource in performing
+ handshake.
+ """
+
+ _LOGGER.debug('Client\'s opening handshake resource: %r', request.uri)
+ # To print mimetools.Message as escaped one-line string, we converts
+ # headers_in to dict object. Without conversion, if we use %r, it just
+ # prints the type and address, and if we use %s, it prints the original
+ # header string as multiple lines.
+ #
+ # Both mimetools.Message and MpTable_Type of mod_python can be
+ # converted to dict.
+ #
+ # mimetools.Message.__str__ returns the original header string.
+ # dict(mimetools.Message object) returns the map from header names to
+ # header values. While MpTable_Type doesn't have such __str__ but just
+ # __repr__ which formats itself as well as dictionary object.
+ _LOGGER.debug(
+ 'Client\'s opening handshake headers: %r', dict(request.headers_in))
+
+ handshakers = []
+ handshakers.append(
+ ('RFC 6455', hybi.Handshaker(request, dispatcher)))
+ handshakers.append(
+ ('HyBi 00', hybi00.Handshaker(request, dispatcher)))
+ if allowDraft75:
+ handshakers.append(
+ ('Hixie 75', draft75.Handshaker(request, dispatcher, strict)))
+
+ for name, handshaker in handshakers:
+ _LOGGER.debug('Trying protocol version %s', name)
+ try:
+ handshaker.do_handshake()
+ _LOGGER.info('Established (%s protocol)', name)
+ return
+ except HandshakeException, e:
+ _LOGGER.debug(
+ 'Failed to complete opening handshake as %s protocol: %r',
+ name, e)
+ if e.status:
+ raise e
+ except AbortedByUserException, e:
+ raise
+ except VersionException, e:
+ raise
+
+ # TODO(toyoshim): Add a test to cover the case all handshakers fail.
+ raise HandshakeException(
+ 'Failed to complete opening handshake for all available protocols',
+ status=common.HTTP_STATUS_BAD_REQUEST)
+
+
+# vi:sts=4 sw=4 et
diff --git a/module/lib/mod_pywebsocket/handshake/_base.py b/module/lib/mod_pywebsocket/handshake/_base.py
new file mode 100644
index 000000000..bc095b129
--- /dev/null
+++ b/module/lib/mod_pywebsocket/handshake/_base.py
@@ -0,0 +1,219 @@
+# Copyright 2012, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Common functions and exceptions used by WebSocket opening handshake
+processors.
+"""
+
+
+from mod_pywebsocket import common
+from mod_pywebsocket import http_header_util
+
+
+class AbortedByUserException(Exception):
+ """Exception for aborting a connection intentionally.
+
+ If this exception is raised in do_extra_handshake handler, the connection
+ will be abandoned. No other WebSocket or HTTP(S) handler will be invoked.
+
+ If this exception is raised in transfer_data_handler, the connection will
+ be closed without closing handshake. No other WebSocket or HTTP(S) handler
+ will be invoked.
+ """
+
+ pass
+
+
+class HandshakeException(Exception):
+ """This exception will be raised when an error occurred while processing
+ WebSocket initial handshake.
+ """
+
+ def __init__(self, name, status=None):
+ super(HandshakeException, self).__init__(name)
+ self.status = status
+
+
+class VersionException(Exception):
+ """This exception will be raised when a version of client request does not
+ match with version the server supports.
+ """
+
+ def __init__(self, name, supported_versions=''):
+ """Construct an instance.
+
+ Args:
+ supported_version: a str object to show supported hybi versions.
+ (e.g. '8, 13')
+ """
+ super(VersionException, self).__init__(name)
+ self.supported_versions = supported_versions
+
+
+def get_default_port(is_secure):
+ if is_secure:
+ return common.DEFAULT_WEB_SOCKET_SECURE_PORT
+ else:
+ return common.DEFAULT_WEB_SOCKET_PORT
+
+
+def validate_subprotocol(subprotocol, hixie):
+ """Validate a value in subprotocol fields such as WebSocket-Protocol,
+ Sec-WebSocket-Protocol.
+
+ See
+ - RFC 6455: Section 4.1., 4.2.2., and 4.3.
+ - HyBi 00: Section 4.1. Opening handshake
+ - Hixie 75: Section 4.1. Handshake
+ """
+
+ if not subprotocol:
+ raise HandshakeException('Invalid subprotocol name: empty')
+ if hixie:
+ # Parameter should be in the range U+0020 to U+007E.
+ for c in subprotocol:
+ if not 0x20 <= ord(c) <= 0x7e:
+ raise HandshakeException(
+ 'Illegal character in subprotocol name: %r' % c)
+ else:
+ # Parameter should be encoded HTTP token.
+ state = http_header_util.ParsingState(subprotocol)
+ token = http_header_util.consume_token(state)
+ rest = http_header_util.peek(state)
+ # If |rest| is not None, |subprotocol| is not one token or invalid. If
+ # |rest| is None, |token| must not be None because |subprotocol| is
+ # concatenation of |token| and |rest| and is not None.
+ if rest is not None:
+ raise HandshakeException('Invalid non-token string in subprotocol '
+ 'name: %r' % rest)
+
+
+def parse_host_header(request):
+ fields = request.headers_in['Host'].split(':', 1)
+ if len(fields) == 1:
+ return fields[0], get_default_port(request.is_https())
+ try:
+ return fields[0], int(fields[1])
+ except ValueError, e:
+ raise HandshakeException('Invalid port number format: %r' % e)
+
+
+def format_header(name, value):
+ return '%s: %s\r\n' % (name, value)
+
+
+def build_location(request):
+ """Build WebSocket location for request."""
+ location_parts = []
+ if request.is_https():
+ location_parts.append(common.WEB_SOCKET_SECURE_SCHEME)
+ else:
+ location_parts.append(common.WEB_SOCKET_SCHEME)
+ location_parts.append('://')
+ host, port = parse_host_header(request)
+ connection_port = request.connection.local_addr[1]
+ if port != connection_port:
+ raise HandshakeException('Header/connection port mismatch: %d/%d' %
+ (port, connection_port))
+ location_parts.append(host)
+ if (port != get_default_port(request.is_https())):
+ location_parts.append(':')
+ location_parts.append(str(port))
+ location_parts.append(request.uri)
+ return ''.join(location_parts)
+
+
+def get_mandatory_header(request, key):
+ value = request.headers_in.get(key)
+ if value is None:
+ raise HandshakeException('Header %s is not defined' % key)
+ return value
+
+
+def validate_mandatory_header(request, key, expected_value, fail_status=None):
+ value = get_mandatory_header(request, key)
+
+ if value.lower() != expected_value.lower():
+ raise HandshakeException(
+ 'Expected %r for header %s but found %r (case-insensitive)' %
+ (expected_value, key, value), status=fail_status)
+
+
+def check_request_line(request):
+ # 5.1 1. The three character UTF-8 string "GET".
+ # 5.1 2. A UTF-8-encoded U+0020 SPACE character (0x20 byte).
+ if request.method != 'GET':
+ raise HandshakeException('Method is not GET')
+
+
+def check_header_lines(request, mandatory_headers):
+ check_request_line(request)
+
+ # The expected field names, and the meaning of their corresponding
+ # values, are as follows.
+ # |Upgrade| and |Connection|
+ for key, expected_value in mandatory_headers:
+ validate_mandatory_header(request, key, expected_value)
+
+
+def parse_token_list(data):
+ """Parses a header value which follows 1#token and returns parsed elements
+ as a list of strings.
+
+ Leading LWSes must be trimmed.
+ """
+
+ state = http_header_util.ParsingState(data)
+
+ token_list = []
+
+ while True:
+ token = http_header_util.consume_token(state)
+ if token is not None:
+ token_list.append(token)
+
+ http_header_util.consume_lwses(state)
+
+ if http_header_util.peek(state) is None:
+ break
+
+ if not http_header_util.consume_string(state, ','):
+ raise HandshakeException(
+ 'Expected a comma but found %r' % http_header_util.peek(state))
+
+ http_header_util.consume_lwses(state)
+
+ if len(token_list) == 0:
+ raise HandshakeException('No valid token found')
+
+ return token_list
+
+
+# vi:sts=4 sw=4 et
diff --git a/module/lib/mod_pywebsocket/handshake/draft75.py b/module/lib/mod_pywebsocket/handshake/draft75.py
new file mode 100644
index 000000000..802a31c9a
--- /dev/null
+++ b/module/lib/mod_pywebsocket/handshake/draft75.py
@@ -0,0 +1,190 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""WebSocket handshaking defined in draft-hixie-thewebsocketprotocol-75."""
+
+
+# Note: request.connection.write is used in this module, even though mod_python
+# document says that it should be used only in connection handlers.
+# Unfortunately, we have no other options. For example, request.write is not
+# suitable because it doesn't allow direct raw bytes writing.
+
+
+import logging
+import re
+
+from mod_pywebsocket import common
+from mod_pywebsocket.stream import StreamHixie75
+from mod_pywebsocket import util
+from mod_pywebsocket.handshake._base import HandshakeException
+from mod_pywebsocket.handshake._base import build_location
+from mod_pywebsocket.handshake._base import validate_subprotocol
+
+
+_MANDATORY_HEADERS = [
+ # key, expected value or None
+ ['Upgrade', 'WebSocket'],
+ ['Connection', 'Upgrade'],
+ ['Host', None],
+ ['Origin', None],
+]
+
+_FIRST_FIVE_LINES = map(re.compile, [
+ r'^GET /[\S]* HTTP/1.1\r\n$',
+ r'^Upgrade: WebSocket\r\n$',
+ r'^Connection: Upgrade\r\n$',
+ r'^Host: [\S]+\r\n$',
+ r'^Origin: [\S]+\r\n$',
+])
+
+_SIXTH_AND_LATER = re.compile(
+ r'^'
+ r'(WebSocket-Protocol: [\x20-\x7e]+\r\n)?'
+ r'(Cookie: [^\r]*\r\n)*'
+ r'(Cookie2: [^\r]*\r\n)?'
+ r'(Cookie: [^\r]*\r\n)*'
+ r'\r\n')
+
+
+class Handshaker(object):
+ """This class performs WebSocket handshake."""
+
+ def __init__(self, request, dispatcher, strict=False):
+ """Construct an instance.
+
+ Args:
+ request: mod_python request.
+ dispatcher: Dispatcher (dispatch.Dispatcher).
+ strict: Strictly check handshake request. Default: False.
+ If True, request.connection must provide get_memorized_lines
+ method.
+
+ Handshaker will add attributes such as ws_resource in performing
+ handshake.
+ """
+
+ self._logger = util.get_class_logger(self)
+
+ self._request = request
+ self._dispatcher = dispatcher
+ self._strict = strict
+
+ def do_handshake(self):
+ """Perform WebSocket Handshake.
+
+ On _request, we set
+ ws_resource, ws_origin, ws_location, ws_protocol
+ ws_challenge_md5: WebSocket handshake information.
+ ws_stream: Frame generation/parsing class.
+ ws_version: Protocol version.
+ """
+
+ self._check_header_lines()
+ self._set_resource()
+ self._set_origin()
+ self._set_location()
+ self._set_subprotocol()
+ self._set_protocol_version()
+
+ self._dispatcher.do_extra_handshake(self._request)
+
+ self._send_handshake()
+
+ self._logger.debug('Sent opening handshake response')
+
+ def _set_resource(self):
+ self._request.ws_resource = self._request.uri
+
+ def _set_origin(self):
+ self._request.ws_origin = self._request.headers_in['Origin']
+
+ def _set_location(self):
+ self._request.ws_location = build_location(self._request)
+
+ def _set_subprotocol(self):
+ subprotocol = self._request.headers_in.get('WebSocket-Protocol')
+ if subprotocol is not None:
+ validate_subprotocol(subprotocol, hixie=True)
+ self._request.ws_protocol = subprotocol
+
+ def _set_protocol_version(self):
+ self._logger.debug('IETF Hixie 75 protocol')
+ self._request.ws_version = common.VERSION_HIXIE75
+ self._request.ws_stream = StreamHixie75(self._request)
+
+ def _sendall(self, data):
+ self._request.connection.write(data)
+
+ def _send_handshake(self):
+ self._sendall('HTTP/1.1 101 Web Socket Protocol Handshake\r\n')
+ self._sendall('Upgrade: WebSocket\r\n')
+ self._sendall('Connection: Upgrade\r\n')
+ self._sendall('WebSocket-Origin: %s\r\n' % self._request.ws_origin)
+ self._sendall('WebSocket-Location: %s\r\n' % self._request.ws_location)
+ if self._request.ws_protocol:
+ self._sendall(
+ 'WebSocket-Protocol: %s\r\n' % self._request.ws_protocol)
+ self._sendall('\r\n')
+
+ def _check_header_lines(self):
+ for key, expected_value in _MANDATORY_HEADERS:
+ actual_value = self._request.headers_in.get(key)
+ if not actual_value:
+ raise HandshakeException('Header %s is not defined' % key)
+ if expected_value:
+ if actual_value != expected_value:
+ raise HandshakeException(
+ 'Expected %r for header %s but found %r' %
+ (expected_value, key, actual_value))
+ if self._strict:
+ try:
+ lines = self._request.connection.get_memorized_lines()
+ except AttributeError, e:
+ raise AttributeError(
+ 'Strict handshake is specified but the connection '
+ 'doesn\'t provide get_memorized_lines()')
+ self._check_first_lines(lines)
+
+ def _check_first_lines(self, lines):
+ if len(lines) < len(_FIRST_FIVE_LINES):
+ raise HandshakeException('Too few header lines: %d' % len(lines))
+ for line, regexp in zip(lines, _FIRST_FIVE_LINES):
+ if not regexp.search(line):
+ raise HandshakeException(
+ 'Unexpected header: %r doesn\'t match %r'
+ % (line, regexp.pattern))
+ sixth_and_later = ''.join(lines[5:])
+ if not _SIXTH_AND_LATER.search(sixth_and_later):
+ raise HandshakeException(
+ 'Unexpected header: %r doesn\'t match %r'
+ % (sixth_and_later, _SIXTH_AND_LATER.pattern))
+
+
+# vi:sts=4 sw=4 et
diff --git a/module/lib/mod_pywebsocket/handshake/hybi.py b/module/lib/mod_pywebsocket/handshake/hybi.py
new file mode 100644
index 000000000..2883acbf8
--- /dev/null
+++ b/module/lib/mod_pywebsocket/handshake/hybi.py
@@ -0,0 +1,372 @@
+# Copyright 2012, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""This file provides the opening handshake processor for the WebSocket
+protocol (RFC 6455).
+
+Specification:
+http://tools.ietf.org/html/rfc6455
+"""
+
+
+# Note: request.connection.write is used in this module, even though mod_python
+# document says that it should be used only in connection handlers.
+# Unfortunately, we have no other options. For example, request.write is not
+# suitable because it doesn't allow direct raw bytes writing.
+
+
+import base64
+import logging
+import os
+import re
+
+from mod_pywebsocket import common
+from mod_pywebsocket.extensions import get_extension_processor
+from mod_pywebsocket.handshake._base import check_request_line
+from mod_pywebsocket.handshake._base import format_header
+from mod_pywebsocket.handshake._base import get_mandatory_header
+from mod_pywebsocket.handshake._base import HandshakeException
+from mod_pywebsocket.handshake._base import parse_token_list
+from mod_pywebsocket.handshake._base import validate_mandatory_header
+from mod_pywebsocket.handshake._base import validate_subprotocol
+from mod_pywebsocket.handshake._base import VersionException
+from mod_pywebsocket.stream import Stream
+from mod_pywebsocket.stream import StreamOptions
+from mod_pywebsocket import util
+
+
+# Used to validate the value in the Sec-WebSocket-Key header strictly. RFC 4648
+# disallows non-zero padding, so the character right before == must be any of
+# A, Q, g and w.
+_SEC_WEBSOCKET_KEY_REGEX = re.compile('^[+/0-9A-Za-z]{21}[AQgw]==$')
+
+# Defining aliases for values used frequently.
+_VERSION_HYBI08 = common.VERSION_HYBI08
+_VERSION_HYBI08_STRING = str(_VERSION_HYBI08)
+_VERSION_LATEST = common.VERSION_HYBI_LATEST
+_VERSION_LATEST_STRING = str(_VERSION_LATEST)
+_SUPPORTED_VERSIONS = [
+ _VERSION_LATEST,
+ _VERSION_HYBI08,
+]
+
+
+def compute_accept(key):
+ """Computes value for the Sec-WebSocket-Accept header from value of the
+ Sec-WebSocket-Key header.
+ """
+
+ accept_binary = util.sha1_hash(
+ key + common.WEBSOCKET_ACCEPT_UUID).digest()
+ accept = base64.b64encode(accept_binary)
+
+ return (accept, accept_binary)
+
+
+class Handshaker(object):
+ """Opening handshake processor for the WebSocket protocol (RFC 6455)."""
+
+ def __init__(self, request, dispatcher):
+ """Construct an instance.
+
+ Args:
+ request: mod_python request.
+ dispatcher: Dispatcher (dispatch.Dispatcher).
+
+ Handshaker will add attributes such as ws_resource during handshake.
+ """
+
+ self._logger = util.get_class_logger(self)
+
+ self._request = request
+ self._dispatcher = dispatcher
+
+ def _validate_connection_header(self):
+ connection = get_mandatory_header(
+ self._request, common.CONNECTION_HEADER)
+
+ try:
+ connection_tokens = parse_token_list(connection)
+ except HandshakeException, e:
+ raise HandshakeException(
+ 'Failed to parse %s: %s' % (common.CONNECTION_HEADER, e))
+
+ connection_is_valid = False
+ for token in connection_tokens:
+ if token.lower() == common.UPGRADE_CONNECTION_TYPE.lower():
+ connection_is_valid = True
+ break
+ if not connection_is_valid:
+ raise HandshakeException(
+ '%s header doesn\'t contain "%s"' %
+ (common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE))
+
+ def do_handshake(self):
+ self._request.ws_close_code = None
+ self._request.ws_close_reason = None
+
+ # Parsing.
+
+ check_request_line(self._request)
+
+ validate_mandatory_header(
+ self._request,
+ common.UPGRADE_HEADER,
+ common.WEBSOCKET_UPGRADE_TYPE)
+
+ self._validate_connection_header()
+
+ self._request.ws_resource = self._request.uri
+
+ unused_host = get_mandatory_header(self._request, common.HOST_HEADER)
+
+ self._request.ws_version = self._check_version()
+
+ # This handshake must be based on latest hybi. We are responsible to
+ # fallback to HTTP on handshake failure as latest hybi handshake
+ # specifies.
+ try:
+ self._get_origin()
+ self._set_protocol()
+ self._parse_extensions()
+
+ # Key validation, response generation.
+
+ key = self._get_key()
+ (accept, accept_binary) = compute_accept(key)
+ self._logger.debug(
+ '%s: %r (%s)',
+ common.SEC_WEBSOCKET_ACCEPT_HEADER,
+ accept,
+ util.hexify(accept_binary))
+
+ self._logger.debug('Protocol version is RFC 6455')
+
+ # Setup extension processors.
+
+ processors = []
+ if self._request.ws_requested_extensions is not None:
+ for extension_request in self._request.ws_requested_extensions:
+ processor = get_extension_processor(extension_request)
+ # Unknown extension requests are just ignored.
+ if processor is not None:
+ processors.append(processor)
+ self._request.ws_extension_processors = processors
+
+ # Extra handshake handler may modify/remove processors.
+ self._dispatcher.do_extra_handshake(self._request)
+
+ stream_options = StreamOptions()
+
+ self._request.ws_extensions = None
+ for processor in self._request.ws_extension_processors:
+ if processor is None:
+ # Some processors may be removed by extra handshake
+ # handler.
+ continue
+
+ extension_response = processor.get_extension_response()
+ if extension_response is None:
+ # Rejected.
+ continue
+
+ if self._request.ws_extensions is None:
+ self._request.ws_extensions = []
+ self._request.ws_extensions.append(extension_response)
+
+ processor.setup_stream_options(stream_options)
+
+ if self._request.ws_extensions is not None:
+ self._logger.debug(
+ 'Extensions accepted: %r',
+ map(common.ExtensionParameter.name,
+ self._request.ws_extensions))
+
+ self._request.ws_stream = Stream(self._request, stream_options)
+
+ if self._request.ws_requested_protocols is not None:
+ if self._request.ws_protocol is None:
+ raise HandshakeException(
+ 'do_extra_handshake must choose one subprotocol from '
+ 'ws_requested_protocols and set it to ws_protocol')
+ validate_subprotocol(self._request.ws_protocol, hixie=False)
+
+ self._logger.debug(
+ 'Subprotocol accepted: %r',
+ self._request.ws_protocol)
+ else:
+ if self._request.ws_protocol is not None:
+ raise HandshakeException(
+ 'ws_protocol must be None when the client didn\'t '
+ 'request any subprotocol')
+
+ self._send_handshake(accept)
+ except HandshakeException, e:
+ if not e.status:
+ # Fallback to 400 bad request by default.
+ e.status = common.HTTP_STATUS_BAD_REQUEST
+ raise e
+
+ def _get_origin(self):
+ if self._request.ws_version is _VERSION_HYBI08:
+ origin_header = common.SEC_WEBSOCKET_ORIGIN_HEADER
+ else:
+ origin_header = common.ORIGIN_HEADER
+ origin = self._request.headers_in.get(origin_header)
+ if origin is None:
+ self._logger.debug('Client request does not have origin header')
+ self._request.ws_origin = origin
+
+ def _check_version(self):
+ version = get_mandatory_header(self._request,
+ common.SEC_WEBSOCKET_VERSION_HEADER)
+ if version == _VERSION_HYBI08_STRING:
+ return _VERSION_HYBI08
+ if version == _VERSION_LATEST_STRING:
+ return _VERSION_LATEST
+
+ if version.find(',') >= 0:
+ raise HandshakeException(
+ 'Multiple versions (%r) are not allowed for header %s' %
+ (version, common.SEC_WEBSOCKET_VERSION_HEADER),
+ status=common.HTTP_STATUS_BAD_REQUEST)
+ raise VersionException(
+ 'Unsupported version %r for header %s' %
+ (version, common.SEC_WEBSOCKET_VERSION_HEADER),
+ supported_versions=', '.join(map(str, _SUPPORTED_VERSIONS)))
+
+ def _set_protocol(self):
+ self._request.ws_protocol = None
+
+ protocol_header = self._request.headers_in.get(
+ common.SEC_WEBSOCKET_PROTOCOL_HEADER)
+
+ if not protocol_header:
+ self._request.ws_requested_protocols = None
+ return
+
+ self._request.ws_requested_protocols = parse_token_list(
+ protocol_header)
+ self._logger.debug('Subprotocols requested: %r',
+ self._request.ws_requested_protocols)
+
+ def _parse_extensions(self):
+ extensions_header = self._request.headers_in.get(
+ common.SEC_WEBSOCKET_EXTENSIONS_HEADER)
+ if not extensions_header:
+ self._request.ws_requested_extensions = None
+ return
+
+ if self._request.ws_version is common.VERSION_HYBI08:
+ allow_quoted_string=False
+ else:
+ allow_quoted_string=True
+ try:
+ self._request.ws_requested_extensions = common.parse_extensions(
+ extensions_header, allow_quoted_string=allow_quoted_string)
+ except common.ExtensionParsingException, e:
+ raise HandshakeException(
+ 'Failed to parse Sec-WebSocket-Extensions header: %r' % e)
+
+ self._logger.debug(
+ 'Extensions requested: %r',
+ map(common.ExtensionParameter.name,
+ self._request.ws_requested_extensions))
+
+ def _validate_key(self, key):
+ if key.find(',') >= 0:
+ raise HandshakeException('Request has multiple %s header lines or '
+ 'contains illegal character \',\': %r' %
+ (common.SEC_WEBSOCKET_KEY_HEADER, key))
+
+ # Validate
+ key_is_valid = False
+ try:
+ # Validate key by quick regex match before parsing by base64
+ # module. Because base64 module skips invalid characters, we have
+ # to do this in advance to make this server strictly reject illegal
+ # keys.
+ if _SEC_WEBSOCKET_KEY_REGEX.match(key):
+ decoded_key = base64.b64decode(key)
+ if len(decoded_key) == 16:
+ key_is_valid = True
+ except TypeError, e:
+ pass
+
+ if not key_is_valid:
+ raise HandshakeException(
+ 'Illegal value for header %s: %r' %
+ (common.SEC_WEBSOCKET_KEY_HEADER, key))
+
+ return decoded_key
+
+ def _get_key(self):
+ key = get_mandatory_header(
+ self._request, common.SEC_WEBSOCKET_KEY_HEADER)
+
+ decoded_key = self._validate_key(key)
+
+ self._logger.debug(
+ '%s: %r (%s)',
+ common.SEC_WEBSOCKET_KEY_HEADER,
+ key,
+ util.hexify(decoded_key))
+
+ return key
+
+ def _send_handshake(self, accept):
+ response = []
+
+ response.append('HTTP/1.1 101 Switching Protocols\r\n')
+
+ response.append(format_header(
+ common.UPGRADE_HEADER, common.WEBSOCKET_UPGRADE_TYPE))
+ response.append(format_header(
+ common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE))
+ response.append(format_header(
+ common.SEC_WEBSOCKET_ACCEPT_HEADER, accept))
+ if self._request.ws_protocol is not None:
+ response.append(format_header(
+ common.SEC_WEBSOCKET_PROTOCOL_HEADER,
+ self._request.ws_protocol))
+ if (self._request.ws_extensions is not None and
+ len(self._request.ws_extensions) != 0):
+ response.append(format_header(
+ common.SEC_WEBSOCKET_EXTENSIONS_HEADER,
+ common.format_extensions(self._request.ws_extensions)))
+ response.append('\r\n')
+
+ raw_response = ''.join(response)
+ self._request.connection.write(raw_response)
+ self._logger.debug('Sent server\'s opening handshake: %r',
+ raw_response)
+
+
+# vi:sts=4 sw=4 et
diff --git a/module/lib/mod_pywebsocket/handshake/hybi00.py b/module/lib/mod_pywebsocket/handshake/hybi00.py
new file mode 100644
index 000000000..cc6f8dc43
--- /dev/null
+++ b/module/lib/mod_pywebsocket/handshake/hybi00.py
@@ -0,0 +1,242 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""This file provides the opening handshake processor for the WebSocket
+protocol version HyBi 00.
+
+Specification:
+http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-00
+"""
+
+
+# Note: request.connection.write/read are used in this module, even though
+# mod_python document says that they should be used only in connection
+# handlers. Unfortunately, we have no other options. For example,
+# request.write/read are not suitable because they don't allow direct raw bytes
+# writing/reading.
+
+
+import logging
+import re
+import struct
+
+from mod_pywebsocket import common
+from mod_pywebsocket.stream import StreamHixie75
+from mod_pywebsocket import util
+from mod_pywebsocket.handshake._base import HandshakeException
+from mod_pywebsocket.handshake._base import build_location
+from mod_pywebsocket.handshake._base import check_header_lines
+from mod_pywebsocket.handshake._base import format_header
+from mod_pywebsocket.handshake._base import get_mandatory_header
+from mod_pywebsocket.handshake._base import validate_subprotocol
+
+
+_MANDATORY_HEADERS = [
+ # key, expected value or None
+ [common.UPGRADE_HEADER, common.WEBSOCKET_UPGRADE_TYPE_HIXIE75],
+ [common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE],
+]
+
+
+class Handshaker(object):
+ """Opening handshake processor for the WebSocket protocol version HyBi 00.
+ """
+
+ def __init__(self, request, dispatcher):
+ """Construct an instance.
+
+ Args:
+ request: mod_python request.
+ dispatcher: Dispatcher (dispatch.Dispatcher).
+
+ Handshaker will add attributes such as ws_resource in performing
+ handshake.
+ """
+
+ self._logger = util.get_class_logger(self)
+
+ self._request = request
+ self._dispatcher = dispatcher
+
+ def do_handshake(self):
+ """Perform WebSocket Handshake.
+
+ On _request, we set
+ ws_resource, ws_protocol, ws_location, ws_origin, ws_challenge,
+ ws_challenge_md5: WebSocket handshake information.
+ ws_stream: Frame generation/parsing class.
+ ws_version: Protocol version.
+
+ Raises:
+ HandshakeException: when any error happened in parsing the opening
+ handshake request.
+ """
+
+ # 5.1 Reading the client's opening handshake.
+ # dispatcher sets it in self._request.
+ check_header_lines(self._request, _MANDATORY_HEADERS)
+ self._set_resource()
+ self._set_subprotocol()
+ self._set_location()
+ self._set_origin()
+ self._set_challenge_response()
+ self._set_protocol_version()
+
+ self._dispatcher.do_extra_handshake(self._request)
+
+ self._send_handshake()
+
+ def _set_resource(self):
+ self._request.ws_resource = self._request.uri
+
+ def _set_subprotocol(self):
+ # |Sec-WebSocket-Protocol|
+ subprotocol = self._request.headers_in.get(
+ common.SEC_WEBSOCKET_PROTOCOL_HEADER)
+ if subprotocol is not None:
+ validate_subprotocol(subprotocol, hixie=True)
+ self._request.ws_protocol = subprotocol
+
+ def _set_location(self):
+ # |Host|
+ host = self._request.headers_in.get(common.HOST_HEADER)
+ if host is not None:
+ self._request.ws_location = build_location(self._request)
+ # TODO(ukai): check host is this host.
+
+ def _set_origin(self):
+ # |Origin|
+ origin = self._request.headers_in.get(common.ORIGIN_HEADER)
+ if origin is not None:
+ self._request.ws_origin = origin
+
+ def _set_protocol_version(self):
+ # |Sec-WebSocket-Draft|
+ draft = self._request.headers_in.get(common.SEC_WEBSOCKET_DRAFT_HEADER)
+ if draft is not None and draft != '0':
+ raise HandshakeException('Illegal value for %s: %s' %
+ (common.SEC_WEBSOCKET_DRAFT_HEADER,
+ draft))
+
+ self._logger.debug('Protocol version is HyBi 00')
+ self._request.ws_version = common.VERSION_HYBI00
+ self._request.ws_stream = StreamHixie75(self._request, True)
+
+ def _set_challenge_response(self):
+ # 5.2 4-8.
+ self._request.ws_challenge = self._get_challenge()
+ # 5.2 9. let /response/ be the MD5 finterprint of /challenge/
+ self._request.ws_challenge_md5 = util.md5_hash(
+ self._request.ws_challenge).digest()
+ self._logger.debug(
+ 'Challenge: %r (%s)',
+ self._request.ws_challenge,
+ util.hexify(self._request.ws_challenge))
+ self._logger.debug(
+ 'Challenge response: %r (%s)',
+ self._request.ws_challenge_md5,
+ util.hexify(self._request.ws_challenge_md5))
+
+ def _get_key_value(self, key_field):
+ key_value = get_mandatory_header(self._request, key_field)
+
+ self._logger.debug('%s: %r', key_field, key_value)
+
+ # 5.2 4. let /key-number_n/ be the digits (characters in the range
+ # U+0030 DIGIT ZERO (0) to U+0039 DIGIT NINE (9)) in /key_n/,
+ # interpreted as a base ten integer, ignoring all other characters
+ # in /key_n/.
+ try:
+ key_number = int(re.sub("\\D", "", key_value))
+ except:
+ raise HandshakeException('%s field contains no digit' % key_field)
+ # 5.2 5. let /spaces_n/ be the number of U+0020 SPACE characters
+ # in /key_n/.
+ spaces = re.subn(" ", "", key_value)[1]
+ if spaces == 0:
+ raise HandshakeException('%s field contains no space' % key_field)
+
+ self._logger.debug(
+ '%s: Key-number is %d and number of spaces is %d',
+ key_field, key_number, spaces)
+
+ # 5.2 6. if /key-number_n/ is not an integral multiple of /spaces_n/
+ # then abort the WebSocket connection.
+ if key_number % spaces != 0:
+ raise HandshakeException(
+ '%s: Key-number (%d) is not an integral multiple of spaces '
+ '(%d)' % (key_field, key_number, spaces))
+ # 5.2 7. let /part_n/ be /key-number_n/ divided by /spaces_n/.
+ part = key_number / spaces
+ self._logger.debug('%s: Part is %d', key_field, part)
+ return part
+
+ def _get_challenge(self):
+ # 5.2 4-7.
+ key1 = self._get_key_value(common.SEC_WEBSOCKET_KEY1_HEADER)
+ key2 = self._get_key_value(common.SEC_WEBSOCKET_KEY2_HEADER)
+ # 5.2 8. let /challenge/ be the concatenation of /part_1/,
+ challenge = ''
+ challenge += struct.pack('!I', key1) # network byteorder int
+ challenge += struct.pack('!I', key2) # network byteorder int
+ challenge += self._request.connection.read(8)
+ return challenge
+
+ def _send_handshake(self):
+ response = []
+
+ # 5.2 10. send the following line.
+ response.append('HTTP/1.1 101 WebSocket Protocol Handshake\r\n')
+
+ # 5.2 11. send the following fields to the client.
+ response.append(format_header(
+ common.UPGRADE_HEADER, common.WEBSOCKET_UPGRADE_TYPE_HIXIE75))
+ response.append(format_header(
+ common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE))
+ response.append(format_header(
+ common.SEC_WEBSOCKET_LOCATION_HEADER, self._request.ws_location))
+ response.append(format_header(
+ common.SEC_WEBSOCKET_ORIGIN_HEADER, self._request.ws_origin))
+ if self._request.ws_protocol:
+ response.append(format_header(
+ common.SEC_WEBSOCKET_PROTOCOL_HEADER,
+ self._request.ws_protocol))
+ # 5.2 12. send two bytes 0x0D 0x0A.
+ response.append('\r\n')
+ # 5.2 13. send /response/
+ response.append(self._request.ws_challenge_md5)
+
+ raw_response = ''.join(response)
+ self._request.connection.write(raw_response)
+ self._logger.debug('Sent server\'s opening handshake: %r',
+ raw_response)
+
+
+# vi:sts=4 sw=4 et
diff --git a/module/lib/mod_pywebsocket/headerparserhandler.py b/module/lib/mod_pywebsocket/headerparserhandler.py
new file mode 100644
index 000000000..b68c240e1
--- /dev/null
+++ b/module/lib/mod_pywebsocket/headerparserhandler.py
@@ -0,0 +1,243 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""PythonHeaderParserHandler for mod_pywebsocket.
+
+Apache HTTP Server and mod_python must be configured such that this
+function is called to handle WebSocket request.
+"""
+
+
+import logging
+
+from mod_python import apache
+
+from mod_pywebsocket import common
+from mod_pywebsocket import dispatch
+from mod_pywebsocket import handshake
+from mod_pywebsocket import util
+
+
+# PythonOption to specify the handler root directory.
+_PYOPT_HANDLER_ROOT = 'mod_pywebsocket.handler_root'
+
+# PythonOption to specify the handler scan directory.
+# This must be a directory under the root directory.
+# The default is the root directory.
+_PYOPT_HANDLER_SCAN = 'mod_pywebsocket.handler_scan'
+
+# PythonOption to allow handlers whose canonical path is
+# not under the root directory. It's disallowed by default.
+# Set this option with value of 'yes' to allow.
+_PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT = (
+ 'mod_pywebsocket.allow_handlers_outside_root_dir')
+# Map from values to their meanings. 'Yes' and 'No' are allowed just for
+# compatibility.
+_PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT_DEFINITION = {
+ 'off': False, 'no': False, 'on': True, 'yes': True}
+
+# PythonOption to specify to allow draft75 handshake.
+# The default is None (Off)
+_PYOPT_ALLOW_DRAFT75 = 'mod_pywebsocket.allow_draft75'
+# Map from values to their meanings.
+_PYOPT_ALLOW_DRAFT75_DEFINITION = {'off': False, 'on': True}
+
+
+class ApacheLogHandler(logging.Handler):
+ """Wrapper logging.Handler to emit log message to apache's error.log."""
+
+ _LEVELS = {
+ logging.DEBUG: apache.APLOG_DEBUG,
+ logging.INFO: apache.APLOG_INFO,
+ logging.WARNING: apache.APLOG_WARNING,
+ logging.ERROR: apache.APLOG_ERR,
+ logging.CRITICAL: apache.APLOG_CRIT,
+ }
+
+ def __init__(self, request=None):
+ logging.Handler.__init__(self)
+ self._log_error = apache.log_error
+ if request is not None:
+ self._log_error = request.log_error
+
+ # Time and level will be printed by Apache.
+ self._formatter = logging.Formatter('%(name)s: %(message)s')
+
+ def emit(self, record):
+ apache_level = apache.APLOG_DEBUG
+ if record.levelno in ApacheLogHandler._LEVELS:
+ apache_level = ApacheLogHandler._LEVELS[record.levelno]
+
+ msg = self._formatter.format(record)
+
+ # "server" parameter must be passed to have "level" parameter work.
+ # If only "level" parameter is passed, nothing shows up on Apache's
+ # log. However, at this point, we cannot get the server object of the
+ # virtual host which will process WebSocket requests. The only server
+ # object we can get here is apache.main_server. But Wherever (server
+ # configuration context or virtual host context) we put
+ # PythonHeaderParserHandler directive, apache.main_server just points
+ # the main server instance (not any of virtual server instance). Then,
+ # Apache follows LogLevel directive in the server configuration context
+ # to filter logs. So, we need to specify LogLevel in the server
+ # configuration context. Even if we specify "LogLevel debug" in the
+ # virtual host context which actually handles WebSocket connections,
+ # DEBUG level logs never show up unless "LogLevel debug" is specified
+ # in the server configuration context.
+ #
+ # TODO(tyoshino): Provide logging methods on request object. When
+ # request is mp_request object (when used together with Apache), the
+ # methods call request.log_error indirectly. When request is
+ # _StandaloneRequest, the methods call Python's logging facility which
+ # we create in standalone.py.
+ self._log_error(msg, apache_level, apache.main_server)
+
+
+def _configure_logging():
+ logger = logging.getLogger()
+ # Logs are filtered by Apache based on LogLevel directive in Apache
+ # configuration file. We must just pass logs for all levels to
+ # ApacheLogHandler.
+ logger.setLevel(logging.DEBUG)
+ logger.addHandler(ApacheLogHandler())
+
+
+_configure_logging()
+
+_LOGGER = logging.getLogger(__name__)
+
+
+def _parse_option(name, value, definition):
+ if value is None:
+ return False
+
+ meaning = definition.get(value.lower())
+ if meaning is None:
+ raise Exception('Invalid value for PythonOption %s: %r' %
+ (name, value))
+ return meaning
+
+
+def _create_dispatcher():
+ _LOGGER.info('Initializing Dispatcher')
+
+ options = apache.main_server.get_options()
+
+ handler_root = options.get(_PYOPT_HANDLER_ROOT, None)
+ if not handler_root:
+ raise Exception('PythonOption %s is not defined' % _PYOPT_HANDLER_ROOT,
+ apache.APLOG_ERR)
+
+ handler_scan = options.get(_PYOPT_HANDLER_SCAN, handler_root)
+
+ allow_handlers_outside_root = _parse_option(
+ _PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT,
+ options.get(_PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT),
+ _PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT_DEFINITION)
+
+ dispatcher = dispatch.Dispatcher(
+ handler_root, handler_scan, allow_handlers_outside_root)
+
+ for warning in dispatcher.source_warnings():
+ apache.log_error('mod_pywebsocket: %s' % warning, apache.APLOG_WARNING)
+
+ return dispatcher
+
+
+# Initialize
+_dispatcher = _create_dispatcher()
+
+
+def headerparserhandler(request):
+ """Handle request.
+
+ Args:
+ request: mod_python request.
+
+ This function is named headerparserhandler because it is the default
+ name for a PythonHeaderParserHandler.
+ """
+
+ handshake_is_done = False
+ try:
+ # Fallback to default http handler for request paths for which
+ # we don't have request handlers.
+ if not _dispatcher.get_handler_suite(request.uri):
+ request.log_error('No handler for resource: %r' % request.uri,
+ apache.APLOG_INFO)
+ request.log_error('Fallback to Apache', apache.APLOG_INFO)
+ return apache.DECLINED
+ except dispatch.DispatchException, e:
+ request.log_error('mod_pywebsocket: %s' % e, apache.APLOG_INFO)
+ if not handshake_is_done:
+ return e.status
+
+ try:
+ allow_draft75 = _parse_option(
+ _PYOPT_ALLOW_DRAFT75,
+ apache.main_server.get_options().get(_PYOPT_ALLOW_DRAFT75),
+ _PYOPT_ALLOW_DRAFT75_DEFINITION)
+
+ try:
+ handshake.do_handshake(
+ request, _dispatcher, allowDraft75=allow_draft75)
+ except handshake.VersionException, e:
+ request.log_error('mod_pywebsocket: %s' % e, apache.APLOG_INFO)
+ request.err_headers_out.add(common.SEC_WEBSOCKET_VERSION_HEADER,
+ e.supported_versions)
+ return apache.HTTP_BAD_REQUEST
+ except handshake.HandshakeException, e:
+ # Handshake for ws/wss failed.
+ # Send http response with error status.
+ request.log_error('mod_pywebsocket: %s' % e, apache.APLOG_INFO)
+ return e.status
+
+ handshake_is_done = True
+ request._dispatcher = _dispatcher
+ _dispatcher.transfer_data(request)
+ except handshake.AbortedByUserException, e:
+ request.log_error('mod_pywebsocket: %s' % e, apache.APLOG_INFO)
+ except Exception, e:
+ # DispatchException can also be thrown if something is wrong in
+ # pywebsocket code. It's caught here, then.
+
+ request.log_error('mod_pywebsocket: %s\n%s' %
+ (e, util.get_stack_trace()),
+ apache.APLOG_ERR)
+ # Unknown exceptions before handshake mean Apache must handle its
+ # request with another handler.
+ if not handshake_is_done:
+ return apache.DECLINED
+ # Set assbackwards to suppress response header generation by Apache.
+ request.assbackwards = 1
+ return apache.DONE # Return DONE such that no other handlers are invoked.
+
+
+# vi:sts=4 sw=4 et
diff --git a/module/lib/mod_pywebsocket/http_header_util.py b/module/lib/mod_pywebsocket/http_header_util.py
new file mode 100644
index 000000000..b77465393
--- /dev/null
+++ b/module/lib/mod_pywebsocket/http_header_util.py
@@ -0,0 +1,263 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Utilities for parsing and formatting headers that follow the grammar defined
+in HTTP RFC http://www.ietf.org/rfc/rfc2616.txt.
+"""
+
+
+import urlparse
+
+
+_SEPARATORS = '()<>@,;:\\"/[]?={} \t'
+
+
+def _is_char(c):
+ """Returns true iff c is in CHAR as specified in HTTP RFC."""
+
+ return ord(c) <= 127
+
+
+def _is_ctl(c):
+ """Returns true iff c is in CTL as specified in HTTP RFC."""
+
+ return ord(c) <= 31 or ord(c) == 127
+
+
+class ParsingState(object):
+
+ def __init__(self, data):
+ self.data = data
+ self.head = 0
+
+
+def peek(state, pos=0):
+ """Peeks the character at pos from the head of data."""
+
+ if state.head + pos >= len(state.data):
+ return None
+
+ return state.data[state.head + pos]
+
+
+def consume(state, amount=1):
+ """Consumes specified amount of bytes from the head and returns the
+ consumed bytes. If there's not enough bytes to consume, returns None.
+ """
+
+ if state.head + amount > len(state.data):
+ return None
+
+ result = state.data[state.head:state.head + amount]
+ state.head = state.head + amount
+ return result
+
+
+def consume_string(state, expected):
+ """Given a parsing state and a expected string, consumes the string from
+ the head. Returns True if consumed successfully. Otherwise, returns
+ False.
+ """
+
+ pos = 0
+
+ for c in expected:
+ if c != peek(state, pos):
+ return False
+ pos += 1
+
+ consume(state, pos)
+ return True
+
+
+def consume_lws(state):
+ """Consumes a LWS from the head. Returns True if any LWS is consumed.
+ Otherwise, returns False.
+
+ LWS = [CRLF] 1*( SP | HT )
+ """
+
+ original_head = state.head
+
+ consume_string(state, '\r\n')
+
+ pos = 0
+
+ while True:
+ c = peek(state, pos)
+ if c == ' ' or c == '\t':
+ pos += 1
+ else:
+ if pos == 0:
+ state.head = original_head
+ return False
+ else:
+ consume(state, pos)
+ return True
+
+
+def consume_lwses(state):
+ """Consumes *LWS from the head."""
+
+ while consume_lws(state):
+ pass
+
+
+def consume_token(state):
+ """Consumes a token from the head. Returns the token or None if no token
+ was found.
+ """
+
+ pos = 0
+
+ while True:
+ c = peek(state, pos)
+ if c is None or c in _SEPARATORS or _is_ctl(c) or not _is_char(c):
+ if pos == 0:
+ return None
+
+ return consume(state, pos)
+ else:
+ pos += 1
+
+
+def consume_token_or_quoted_string(state):
+ """Consumes a token or a quoted-string, and returns the token or unquoted
+ string. If no token or quoted-string was found, returns None.
+ """
+
+ original_head = state.head
+
+ if not consume_string(state, '"'):
+ return consume_token(state)
+
+ result = []
+
+ expect_quoted_pair = False
+
+ while True:
+ if not expect_quoted_pair and consume_lws(state):
+ result.append(' ')
+ continue
+
+ c = consume(state)
+ if c is None:
+ # quoted-string is not enclosed with double quotation
+ state.head = original_head
+ return None
+ elif expect_quoted_pair:
+ expect_quoted_pair = False
+ if _is_char(c):
+ result.append(c)
+ else:
+ # Non CHAR character found in quoted-pair
+ state.head = original_head
+ return None
+ elif c == '\\':
+ expect_quoted_pair = True
+ elif c == '"':
+ return ''.join(result)
+ elif _is_ctl(c):
+ # Invalid character %r found in qdtext
+ state.head = original_head
+ return None
+ else:
+ result.append(c)
+
+
+def quote_if_necessary(s):
+ """Quotes arbitrary string into quoted-string."""
+
+ quote = False
+ if s == '':
+ return '""'
+
+ result = []
+ for c in s:
+ if c == '"' or c in _SEPARATORS or _is_ctl(c) or not _is_char(c):
+ quote = True
+
+ if c == '"' or _is_ctl(c):
+ result.append('\\' + c)
+ else:
+ result.append(c)
+
+ if quote:
+ return '"' + ''.join(result) + '"'
+ else:
+ return ''.join(result)
+
+
+def parse_uri(uri):
+ """Parse absolute URI then return host, port and resource."""
+
+ parsed = urlparse.urlsplit(uri)
+ if parsed.scheme != 'wss' and parsed.scheme != 'ws':
+ # |uri| must be a relative URI.
+ # TODO(toyoshim): Should validate |uri|.
+ return None, None, uri
+
+ if parsed.hostname is None:
+ return None, None, None
+
+ port = None
+ try:
+ port = parsed.port
+ except ValueError, e:
+ # port property cause ValueError on invalid null port description like
+ # 'ws://host:/path'.
+ return None, None, None
+
+ if port is None:
+ if parsed.scheme == 'ws':
+ port = 80
+ else:
+ port = 443
+
+ path = parsed.path
+ if not path:
+ path += '/'
+ if parsed.query:
+ path += '?' + parsed.query
+ if parsed.fragment:
+ path += '#' + parsed.fragment
+
+ return parsed.hostname, port, path
+
+
+try:
+ urlparse.uses_netloc.index('ws')
+except ValueError, e:
+ # urlparse in Python2.5.1 doesn't have 'ws' and 'wss' entries.
+ urlparse.uses_netloc.append('ws')
+ urlparse.uses_netloc.append('wss')
+
+
+# vi:sts=4 sw=4 et
diff --git a/module/lib/mod_pywebsocket/memorizingfile.py b/module/lib/mod_pywebsocket/memorizingfile.py
new file mode 100644
index 000000000..4d4cd9585
--- /dev/null
+++ b/module/lib/mod_pywebsocket/memorizingfile.py
@@ -0,0 +1,99 @@
+#!/usr/bin/env python
+#
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Memorizing file.
+
+A memorizing file wraps a file and memorizes lines read by readline.
+"""
+
+
+import sys
+
+
+class MemorizingFile(object):
+ """MemorizingFile wraps a file and memorizes lines read by readline.
+
+ Note that data read by other methods are not memorized. This behavior
+ is good enough for memorizing lines SimpleHTTPServer reads before
+ the control reaches WebSocketRequestHandler.
+ """
+
+ def __init__(self, file_, max_memorized_lines=sys.maxint):
+ """Construct an instance.
+
+ Args:
+ file_: the file object to wrap.
+ max_memorized_lines: the maximum number of lines to memorize.
+ Only the first max_memorized_lines are memorized.
+ Default: sys.maxint.
+ """
+
+ self._file = file_
+ self._memorized_lines = []
+ self._max_memorized_lines = max_memorized_lines
+ self._buffered = False
+ self._buffered_line = None
+
+ def __getattribute__(self, name):
+ if name in ('_file', '_memorized_lines', '_max_memorized_lines',
+ '_buffered', '_buffered_line', 'readline',
+ 'get_memorized_lines'):
+ return object.__getattribute__(self, name)
+ return self._file.__getattribute__(name)
+
+ def readline(self, size=-1):
+ """Override file.readline and memorize the line read.
+
+ Note that even if size is specified and smaller than actual size,
+ the whole line will be read out from underlying file object by
+ subsequent readline calls.
+ """
+
+ if self._buffered:
+ line = self._buffered_line
+ self._buffered = False
+ else:
+ line = self._file.readline()
+ if line and len(self._memorized_lines) < self._max_memorized_lines:
+ self._memorized_lines.append(line)
+ if size >= 0 and size < len(line):
+ self._buffered = True
+ self._buffered_line = line[size:]
+ return line[:size]
+ return line
+
+ def get_memorized_lines(self):
+ """Get lines memorized so far."""
+ return self._memorized_lines
+
+
+# vi:sts=4 sw=4 et
diff --git a/module/lib/mod_pywebsocket/msgutil.py b/module/lib/mod_pywebsocket/msgutil.py
new file mode 100644
index 000000000..21ffdacf6
--- /dev/null
+++ b/module/lib/mod_pywebsocket/msgutil.py
@@ -0,0 +1,219 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Message related utilities.
+
+Note: request.connection.write/read are used in this module, even though
+mod_python document says that they should be used only in connection
+handlers. Unfortunately, we have no other options. For example,
+request.write/read are not suitable because they don't allow direct raw
+bytes writing/reading.
+"""
+
+
+import Queue
+import threading
+
+
+# Export Exception symbols from msgutil for backward compatibility
+from mod_pywebsocket._stream_base import ConnectionTerminatedException
+from mod_pywebsocket._stream_base import InvalidFrameException
+from mod_pywebsocket._stream_base import BadOperationException
+from mod_pywebsocket._stream_base import UnsupportedFrameException
+
+
+# An API for handler to send/receive WebSocket messages.
+def close_connection(request):
+ """Close connection.
+
+ Args:
+ request: mod_python request.
+ """
+ request.ws_stream.close_connection()
+
+
+def send_message(request, message, end=True, binary=False):
+ """Send message.
+
+ Args:
+ request: mod_python request.
+ message: unicode text or str binary to send.
+ end: False to send message as a fragment. All messages until the
+ first call with end=True (inclusive) will be delivered to the
+ client in separate frames but as one WebSocket message.
+ binary: send message as binary frame.
+ Raises:
+ BadOperationException: when server already terminated.
+ """
+ request.ws_stream.send_message(message, end, binary)
+
+
+def receive_message(request):
+ """Receive a WebSocket frame and return its payload as a text in
+ unicode or a binary in str.
+
+ Args:
+ request: mod_python request.
+ Raises:
+ InvalidFrameException: when client send invalid frame.
+ UnsupportedFrameException: when client send unsupported frame e.g. some
+ of reserved bit is set but no extension can
+ recognize it.
+ InvalidUTF8Exception: when client send a text frame containing any
+ invalid UTF-8 string.
+ ConnectionTerminatedException: when the connection is closed
+ unexpectedly.
+ BadOperationException: when client already terminated.
+ """
+ return request.ws_stream.receive_message()
+
+
+def send_ping(request, body=''):
+ request.ws_stream.send_ping(body)
+
+
+class MessageReceiver(threading.Thread):
+ """This class receives messages from the client.
+
+ This class provides three ways to receive messages: blocking,
+ non-blocking, and via callback. Callback has the highest precedence.
+
+ Note: This class should not be used with the standalone server for wss
+ because pyOpenSSL used by the server raises a fatal error if the socket
+ is accessed from multiple threads.
+ """
+
+ def __init__(self, request, onmessage=None):
+ """Construct an instance.
+
+ Args:
+ request: mod_python request.
+ onmessage: a function to be called when a message is received.
+ May be None. If not None, the function is called on
+ another thread. In that case, MessageReceiver.receive
+ and MessageReceiver.receive_nowait are useless
+ because they will never return any messages.
+ """
+
+ threading.Thread.__init__(self)
+ self._request = request
+ self._queue = Queue.Queue()
+ self._onmessage = onmessage
+ self._stop_requested = False
+ self.setDaemon(True)
+ self.start()
+
+ def run(self):
+ try:
+ while not self._stop_requested:
+ message = receive_message(self._request)
+ if self._onmessage:
+ self._onmessage(message)
+ else:
+ self._queue.put(message)
+ finally:
+ close_connection(self._request)
+
+ def receive(self):
+ """ Receive a message from the channel, blocking.
+
+ Returns:
+ message as a unicode string.
+ """
+ return self._queue.get()
+
+ def receive_nowait(self):
+ """ Receive a message from the channel, non-blocking.
+
+ Returns:
+ message as a unicode string if available. None otherwise.
+ """
+ try:
+ message = self._queue.get_nowait()
+ except Queue.Empty:
+ message = None
+ return message
+
+ def stop(self):
+ """Request to stop this instance.
+
+ The instance will be stopped after receiving the next message.
+ This method may not be very useful, but there is no clean way
+ in Python to forcefully stop a running thread.
+ """
+ self._stop_requested = True
+
+
+class MessageSender(threading.Thread):
+ """This class sends messages to the client.
+
+ This class provides both synchronous and asynchronous ways to send
+ messages.
+
+ Note: This class should not be used with the standalone server for wss
+ because pyOpenSSL used by the server raises a fatal error if the socket
+ is accessed from multiple threads.
+ """
+
+ def __init__(self, request):
+ """Construct an instance.
+
+ Args:
+ request: mod_python request.
+ """
+ threading.Thread.__init__(self)
+ self._request = request
+ self._queue = Queue.Queue()
+ self.setDaemon(True)
+ self.start()
+
+ def run(self):
+ while True:
+ message, condition = self._queue.get()
+ condition.acquire()
+ send_message(self._request, message)
+ condition.notify()
+ condition.release()
+
+ def send(self, message):
+ """Send a message, blocking."""
+
+ condition = threading.Condition()
+ condition.acquire()
+ self._queue.put((message, condition))
+ condition.wait()
+
+ def send_nowait(self, message):
+ """Send a message, non-blocking."""
+
+ self._queue.put((message, threading.Condition()))
+
+
+# vi:sts=4 sw=4 et
diff --git a/module/lib/mod_pywebsocket/standalone.py b/module/lib/mod_pywebsocket/standalone.py
new file mode 100755
index 000000000..850aa5cd4
--- /dev/null
+++ b/module/lib/mod_pywebsocket/standalone.py
@@ -0,0 +1,922 @@
+#!/usr/bin/env python
+#
+# Copyright 2012, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Standalone WebSocket server.
+
+BASIC USAGE
+
+Use this server to run mod_pywebsocket without Apache HTTP Server.
+
+Usage:
+ python standalone.py [-p <ws_port>] [-w <websock_handlers>]
+ [-s <scan_dir>]
+ [-d <document_root>]
+ [-m <websock_handlers_map_file>]
+ ... for other options, see _main below ...
+
+<ws_port> is the port number to use for ws:// connection.
+
+<document_root> is the path to the root directory of HTML files.
+
+<websock_handlers> is the path to the root directory of WebSocket handlers.
+See __init__.py for details of <websock_handlers> and how to write WebSocket
+handlers. If this path is relative, <document_root> is used as the base.
+
+<scan_dir> is a path under the root directory. If specified, only the
+handlers under scan_dir are scanned. This is useful in saving scan time.
+
+
+SUPPORTING TLS
+
+To support TLS, run standalone.py with -t, -k, and -c options.
+
+
+SUPPORTING CLIENT AUTHENTICATION
+
+To support client authentication with TLS, run standalone.py with -t, -k, -c,
+and --ca-certificate options.
+
+E.g., $./standalone.py -d ../example -p 10443 -t -c ../test/cert/cert.pem -k
+../test/cert/key.pem --ca-certificate=../test/cert/cacert.pem
+
+
+CONFIGURATION FILE
+
+You can also write a configuration file and use it by specifying the path to
+the configuration file by --config option. Please write a configuration file
+following the documentation of the Python ConfigParser library. Name of each
+entry must be the long version argument name. E.g. to set log level to debug,
+add the following line:
+
+log_level=debug
+
+For options which doesn't take value, please add some fake value. E.g. for
+--tls option, add the following line:
+
+tls=True
+
+Note that tls will be enabled even if you write tls=False as the value part is
+fake.
+
+When both a command line argument and a configuration file entry are set for
+the same configuration item, the command line value will override one in the
+configuration file.
+
+
+THREADING
+
+This server is derived from SocketServer.ThreadingMixIn. Hence a thread is
+used for each request.
+
+
+SECURITY WARNING
+
+This uses CGIHTTPServer and CGIHTTPServer is not secure.
+It may execute arbitrary Python code or external programs. It should not be
+used outside a firewall.
+"""
+
+import BaseHTTPServer
+import CGIHTTPServer
+import SimpleHTTPServer
+import SocketServer
+import ConfigParser
+import httplib
+import logging
+import logging.handlers
+import optparse
+import os
+import re
+import select
+import socket
+import sys
+import threading
+import time
+
+_HAS_SSL = False
+_HAS_OPEN_SSL = False
+try:
+ import ssl
+ _HAS_SSL = True
+except ImportError:
+ try:
+ import OpenSSL.SSL
+ _HAS_OPEN_SSL = True
+ except ImportError:
+ pass
+
+from mod_pywebsocket import common
+from mod_pywebsocket import dispatch
+from mod_pywebsocket import handshake
+from mod_pywebsocket import http_header_util
+from mod_pywebsocket import memorizingfile
+from mod_pywebsocket import util
+
+
+_DEFAULT_LOG_MAX_BYTES = 1024 * 256
+_DEFAULT_LOG_BACKUP_COUNT = 5
+
+_DEFAULT_REQUEST_QUEUE_SIZE = 128
+
+# 1024 is practically large enough to contain WebSocket handshake lines.
+_MAX_MEMORIZED_LINES = 1024
+
+
+class _StandaloneConnection(object):
+ """Mimic mod_python mp_conn."""
+
+ def __init__(self, request_handler):
+ """Construct an instance.
+
+ Args:
+ request_handler: A WebSocketRequestHandler instance.
+ """
+
+ self._request_handler = request_handler
+
+ def get_local_addr(self):
+ """Getter to mimic mp_conn.local_addr."""
+
+ return (self._request_handler.server.server_name,
+ self._request_handler.server.server_port)
+ local_addr = property(get_local_addr)
+
+ def get_remote_addr(self):
+ """Getter to mimic mp_conn.remote_addr.
+
+ Setting the property in __init__ won't work because the request
+ handler is not initialized yet there."""
+
+ return self._request_handler.client_address
+ remote_addr = property(get_remote_addr)
+
+ def write(self, data):
+ """Mimic mp_conn.write()."""
+
+ return self._request_handler.wfile.write(data)
+
+ def read(self, length):
+ """Mimic mp_conn.read()."""
+
+ return self._request_handler.rfile.read(length)
+
+ def get_memorized_lines(self):
+ """Get memorized lines."""
+
+ return self._request_handler.rfile.get_memorized_lines()
+
+
+class _StandaloneRequest(object):
+ """Mimic mod_python request."""
+
+ def __init__(self, request_handler, use_tls):
+ """Construct an instance.
+
+ Args:
+ request_handler: A WebSocketRequestHandler instance.
+ """
+
+ self._logger = util.get_class_logger(self)
+
+ self._request_handler = request_handler
+ self.connection = _StandaloneConnection(request_handler)
+ self._use_tls = use_tls
+ self.headers_in = request_handler.headers
+
+ def get_uri(self):
+ """Getter to mimic request.uri."""
+
+ return self._request_handler.path
+ uri = property(get_uri)
+
+ def get_method(self):
+ """Getter to mimic request.method."""
+
+ return self._request_handler.command
+ method = property(get_method)
+
+ def is_https(self):
+ """Mimic request.is_https()."""
+
+ return self._use_tls
+
+ def _drain_received_data(self):
+ """Don't use this method from WebSocket handler. Drains unread data
+ in the receive buffer.
+ """
+
+ raw_socket = self._request_handler.connection
+ drained_data = util.drain_received_data(raw_socket)
+
+ if drained_data:
+ self._logger.debug(
+ 'Drained data following close frame: %r', drained_data)
+
+
+class _StandaloneSSLConnection(object):
+ """A wrapper class for OpenSSL.SSL.Connection to provide makefile method
+ which is not supported by the class.
+ """
+
+ def __init__(self, connection):
+ self._connection = connection
+
+ def __getattribute__(self, name):
+ if name in ('_connection', 'makefile'):
+ return object.__getattribute__(self, name)
+ return self._connection.__getattribute__(name)
+
+ def __setattr__(self, name, value):
+ if name in ('_connection', 'makefile'):
+ return object.__setattr__(self, name, value)
+ return self._connection.__setattr__(name, value)
+
+ def makefile(self, mode='r', bufsize=-1):
+ return socket._fileobject(self._connection, mode, bufsize)
+
+
+class WebSocketServer(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer):
+ """HTTPServer specialized for WebSocket."""
+
+ # Overrides SocketServer.ThreadingMixIn.daemon_threads
+ daemon_threads = True
+ # Overrides BaseHTTPServer.HTTPServer.allow_reuse_address
+ allow_reuse_address = True
+
+ def __init__(self, options):
+ """Override SocketServer.TCPServer.__init__ to set SSL enabled
+ socket object to self.socket before server_bind and server_activate,
+ if necessary.
+ """
+
+ self._logger = util.get_class_logger(self)
+
+ self.request_queue_size = options.request_queue_size
+ self.__ws_is_shut_down = threading.Event()
+ self.__ws_serving = False
+
+ SocketServer.BaseServer.__init__(
+ self, (options.server_host, options.port), WebSocketRequestHandler)
+
+ # Expose the options object to allow handler objects access it. We name
+ # it with websocket_ prefix to avoid conflict.
+ self.websocket_server_options = options
+
+ self._create_sockets()
+ self.server_bind()
+ self.server_activate()
+
+ def _create_sockets(self):
+ self.server_name, self.server_port = self.server_address
+ self._sockets = []
+ if not self.server_name:
+ # On platforms that doesn't support IPv6, the first bind fails.
+ # On platforms that supports IPv6
+ # - If it binds both IPv4 and IPv6 on call with AF_INET6, the
+ # first bind succeeds and the second fails (we'll see 'Address
+ # already in use' error).
+ # - If it binds only IPv6 on call with AF_INET6, both call are
+ # expected to succeed to listen both protocol.
+ addrinfo_array = [
+ (socket.AF_INET6, socket.SOCK_STREAM, '', '', ''),
+ (socket.AF_INET, socket.SOCK_STREAM, '', '', '')]
+ else:
+ addrinfo_array = socket.getaddrinfo(self.server_name,
+ self.server_port,
+ socket.AF_UNSPEC,
+ socket.SOCK_STREAM,
+ socket.IPPROTO_TCP)
+ for addrinfo in addrinfo_array:
+ self._logger.info('Create socket on: %r', addrinfo)
+ family, socktype, proto, canonname, sockaddr = addrinfo
+ try:
+ socket_ = socket.socket(family, socktype)
+ except Exception, e:
+ self._logger.info('Skip by failure: %r', e)
+ continue
+ if self.websocket_server_options.use_tls:
+ if _HAS_SSL:
+ if self.websocket_server_options.ca_certificate:
+ client_cert_ = ssl.CERT_REQUIRED
+ else:
+ client_cert_ = ssl.CERT_NONE
+ socket_ = ssl.wrap_socket(socket_,
+ keyfile=self.websocket_server_options.private_key,
+ certfile=self.websocket_server_options.certificate,
+ ssl_version=ssl.PROTOCOL_SSLv23,
+ ca_certs=self.websocket_server_options.ca_certificate,
+ cert_reqs=client_cert_)
+ if _HAS_OPEN_SSL:
+ ctx = OpenSSL.SSL.Context(OpenSSL.SSL.SSLv23_METHOD)
+ ctx.use_privatekey_file(
+ self.websocket_server_options.private_key)
+ ctx.use_certificate_file(
+ self.websocket_server_options.certificate)
+ socket_ = OpenSSL.SSL.Connection(ctx, socket_)
+ self._sockets.append((socket_, addrinfo))
+
+ def server_bind(self):
+ """Override SocketServer.TCPServer.server_bind to enable multiple
+ sockets bind.
+ """
+
+ failed_sockets = []
+
+ for socketinfo in self._sockets:
+ socket_, addrinfo = socketinfo
+ self._logger.info('Bind on: %r', addrinfo)
+ if self.allow_reuse_address:
+ socket_.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ try:
+ socket_.bind(self.server_address)
+ except Exception, e:
+ self._logger.info('Skip by failure: %r', e)
+ socket_.close()
+ failed_sockets.append(socketinfo)
+
+ for socketinfo in failed_sockets:
+ self._sockets.remove(socketinfo)
+
+ def server_activate(self):
+ """Override SocketServer.TCPServer.server_activate to enable multiple
+ sockets listen.
+ """
+
+ failed_sockets = []
+
+ for socketinfo in self._sockets:
+ socket_, addrinfo = socketinfo
+ self._logger.info('Listen on: %r', addrinfo)
+ try:
+ socket_.listen(self.request_queue_size)
+ except Exception, e:
+ self._logger.info('Skip by failure: %r', e)
+ socket_.close()
+ failed_sockets.append(socketinfo)
+
+ for socketinfo in failed_sockets:
+ self._sockets.remove(socketinfo)
+
+ def server_close(self):
+ """Override SocketServer.TCPServer.server_close to enable multiple
+ sockets close.
+ """
+
+ for socketinfo in self._sockets:
+ socket_, addrinfo = socketinfo
+ self._logger.info('Close on: %r', addrinfo)
+ socket_.close()
+
+ def fileno(self):
+ """Override SocketServer.TCPServer.fileno."""
+
+ self._logger.critical('Not supported: fileno')
+ return self._sockets[0][0].fileno()
+
+ def handle_error(self, rquest, client_address):
+ """Override SocketServer.handle_error."""
+
+ self._logger.error(
+ 'Exception in processing request from: %r\n%s',
+ client_address,
+ util.get_stack_trace())
+ # Note: client_address is a tuple.
+
+ def get_request(self):
+ """Override TCPServer.get_request to wrap OpenSSL.SSL.Connection
+ object with _StandaloneSSLConnection to provide makefile method. We
+ cannot substitute OpenSSL.SSL.Connection.makefile since it's readonly
+ attribute.
+ """
+
+ accepted_socket, client_address = self.socket.accept()
+ if self.websocket_server_options.use_tls and _HAS_OPEN_SSL:
+ accepted_socket = _StandaloneSSLConnection(accepted_socket)
+ return accepted_socket, client_address
+
+ def serve_forever(self, poll_interval=0.5):
+ """Override SocketServer.BaseServer.serve_forever."""
+
+ self.__ws_serving = True
+ self.__ws_is_shut_down.clear()
+ handle_request = self.handle_request
+ if hasattr(self, '_handle_request_noblock'):
+ handle_request = self._handle_request_noblock
+ else:
+ self._logger.warning('Fallback to blocking request handler')
+ try:
+ while self.__ws_serving:
+ r, w, e = select.select(
+ [socket_[0] for socket_ in self._sockets],
+ [], [], poll_interval)
+ for socket_ in r:
+ self.socket = socket_
+ handle_request()
+ self.socket = None
+ finally:
+ self.__ws_is_shut_down.set()
+
+ def shutdown(self):
+ """Override SocketServer.BaseServer.shutdown."""
+
+ self.__ws_serving = False
+ self.__ws_is_shut_down.wait()
+
+
+class WebSocketRequestHandler(CGIHTTPServer.CGIHTTPRequestHandler):
+ """CGIHTTPRequestHandler specialized for WebSocket."""
+
+ # Use httplib.HTTPMessage instead of mimetools.Message.
+ MessageClass = httplib.HTTPMessage
+
+ def setup(self):
+ """Override SocketServer.StreamRequestHandler.setup to wrap rfile
+ with MemorizingFile.
+
+ This method will be called by BaseRequestHandler's constructor
+ before calling BaseHTTPRequestHandler.handle.
+ BaseHTTPRequestHandler.handle will call
+ BaseHTTPRequestHandler.handle_one_request and it will call
+ WebSocketRequestHandler.parse_request.
+ """
+
+ # Call superclass's setup to prepare rfile, wfile, etc. See setup
+ # definition on the root class SocketServer.StreamRequestHandler to
+ # understand what this does.
+ CGIHTTPServer.CGIHTTPRequestHandler.setup(self)
+
+ self.rfile = memorizingfile.MemorizingFile(
+ self.rfile,
+ max_memorized_lines=_MAX_MEMORIZED_LINES)
+
+ def __init__(self, request, client_address, server):
+ self._logger = util.get_class_logger(self)
+
+ self._options = server.websocket_server_options
+
+ # Overrides CGIHTTPServerRequestHandler.cgi_directories.
+ self.cgi_directories = self._options.cgi_directories
+ # Replace CGIHTTPRequestHandler.is_executable method.
+ if self._options.is_executable_method is not None:
+ self.is_executable = self._options.is_executable_method
+
+ # This actually calls BaseRequestHandler.__init__.
+ CGIHTTPServer.CGIHTTPRequestHandler.__init__(
+ self, request, client_address, server)
+
+ def parse_request(self):
+ """Override BaseHTTPServer.BaseHTTPRequestHandler.parse_request.
+
+ Return True to continue processing for HTTP(S), False otherwise.
+
+ See BaseHTTPRequestHandler.handle_one_request method which calls
+ this method to understand how the return value will be handled.
+ """
+
+ # We hook parse_request method, but also call the original
+ # CGIHTTPRequestHandler.parse_request since when we return False,
+ # CGIHTTPRequestHandler.handle_one_request continues processing and
+ # it needs variables set by CGIHTTPRequestHandler.parse_request.
+ #
+ # Variables set by this method will be also used by WebSocket request
+ # handling (self.path, self.command, self.requestline, etc. See also
+ # how _StandaloneRequest's members are implemented using these
+ # attributes).
+ if not CGIHTTPServer.CGIHTTPRequestHandler.parse_request(self):
+ return False
+ host, port, resource = http_header_util.parse_uri(self.path)
+ if resource is None:
+ self._logger.info('Invalid URI: %r', self.path)
+ self._logger.info('Fallback to CGIHTTPRequestHandler')
+ return True
+ server_options = self.server.websocket_server_options
+ if host is not None:
+ validation_host = server_options.validation_host
+ if validation_host is not None and host != validation_host:
+ self._logger.info('Invalid host: %r (expected: %r)',
+ host,
+ validation_host)
+ self._logger.info('Fallback to CGIHTTPRequestHandler')
+ return True
+ if port is not None:
+ validation_port = server_options.validation_port
+ if validation_port is not None and port != validation_port:
+ self._logger.info('Invalid port: %r (expected: %r)',
+ port,
+ validation_port)
+ self._logger.info('Fallback to CGIHTTPRequestHandler')
+ return True
+ self.path = resource
+
+ request = _StandaloneRequest(self, self._options.use_tls)
+
+ try:
+ # Fallback to default http handler for request paths for which
+ # we don't have request handlers.
+ if not self._options.dispatcher.get_handler_suite(self.path):
+ self._logger.info('No handler for resource: %r',
+ self.path)
+ self._logger.info('Fallback to CGIHTTPRequestHandler')
+ return True
+ except dispatch.DispatchException, e:
+ self._logger.info('%s', e)
+ self.send_error(e.status)
+ return False
+
+ # If any Exceptions without except clause setup (including
+ # DispatchException) is raised below this point, it will be caught
+ # and logged by WebSocketServer.
+
+ try:
+ try:
+ handshake.do_handshake(
+ request,
+ self._options.dispatcher,
+ allowDraft75=self._options.allow_draft75,
+ strict=self._options.strict)
+ except handshake.VersionException, e:
+ self._logger.info('%s', e)
+ self.send_response(common.HTTP_STATUS_BAD_REQUEST)
+ self.send_header(common.SEC_WEBSOCKET_VERSION_HEADER,
+ e.supported_versions)
+ self.end_headers()
+ return False
+ except handshake.HandshakeException, e:
+ # Handshake for ws(s) failed.
+ self._logger.info('%s', e)
+ self.send_error(e.status)
+ return False
+
+ request._dispatcher = self._options.dispatcher
+ self._options.dispatcher.transfer_data(request)
+ except handshake.AbortedByUserException, e:
+ self._logger.info('%s', e)
+ return False
+
+ def log_request(self, code='-', size='-'):
+ """Override BaseHTTPServer.log_request."""
+
+ self._logger.info('"%s" %s %s',
+ self.requestline, str(code), str(size))
+
+ def log_error(self, *args):
+ """Override BaseHTTPServer.log_error."""
+
+ # Despite the name, this method is for warnings than for errors.
+ # For example, HTTP status code is logged by this method.
+ self._logger.warning('%s - %s',
+ self.address_string(),
+ args[0] % args[1:])
+
+ def is_cgi(self):
+ """Test whether self.path corresponds to a CGI script.
+
+ Add extra check that self.path doesn't contains ..
+ Also check if the file is a executable file or not.
+ If the file is not executable, it is handled as static file or dir
+ rather than a CGI script.
+ """
+
+ if CGIHTTPServer.CGIHTTPRequestHandler.is_cgi(self):
+ if '..' in self.path:
+ return False
+ # strip query parameter from request path
+ resource_name = self.path.split('?', 2)[0]
+ # convert resource_name into real path name in filesystem.
+ scriptfile = self.translate_path(resource_name)
+ if not os.path.isfile(scriptfile):
+ return False
+ if not self.is_executable(scriptfile):
+ return False
+ return True
+ return False
+
+
+def _get_logger_from_class(c):
+ return logging.getLogger('%s.%s' % (c.__module__, c.__name__))
+
+
+def _configure_logging(options):
+ logging.addLevelName(common.LOGLEVEL_FINE, 'FINE')
+
+ logger = logging.getLogger()
+ logger.setLevel(logging.getLevelName(options.log_level.upper()))
+ if options.log_file:
+ handler = logging.handlers.RotatingFileHandler(
+ options.log_file, 'a', options.log_max, options.log_count)
+ else:
+ handler = logging.StreamHandler()
+ formatter = logging.Formatter(
+ '[%(asctime)s] [%(levelname)s] %(name)s: %(message)s')
+ handler.setFormatter(formatter)
+ logger.addHandler(handler)
+
+ deflate_log_level_name = logging.getLevelName(
+ options.deflate_log_level.upper())
+ _get_logger_from_class(util._Deflater).setLevel(
+ deflate_log_level_name)
+ _get_logger_from_class(util._Inflater).setLevel(
+ deflate_log_level_name)
+
+
+def _alias_handlers(dispatcher, websock_handlers_map_file):
+ """Set aliases specified in websock_handler_map_file in dispatcher.
+
+ Args:
+ dispatcher: dispatch.Dispatcher instance
+ websock_handler_map_file: alias map file
+ """
+
+ fp = open(websock_handlers_map_file)
+ try:
+ for line in fp:
+ if line[0] == '#' or line.isspace():
+ continue
+ m = re.match('(\S+)\s+(\S+)', line)
+ if not m:
+ logging.warning('Wrong format in map file:' + line)
+ continue
+ try:
+ dispatcher.add_resource_path_alias(
+ m.group(1), m.group(2))
+ except dispatch.DispatchException, e:
+ logging.error(str(e))
+ finally:
+ fp.close()
+
+
+def _build_option_parser():
+ parser = optparse.OptionParser()
+
+ parser.add_option('--config', dest='config_file', type='string',
+ default=None,
+ help=('Path to configuration file. See the file comment '
+ 'at the top of this file for the configuration '
+ 'file format'))
+ parser.add_option('-H', '--server-host', '--server_host',
+ dest='server_host',
+ default='',
+ help='server hostname to listen to')
+ parser.add_option('-V', '--validation-host', '--validation_host',
+ dest='validation_host',
+ default=None,
+ help='server hostname to validate in absolute path.')
+ parser.add_option('-p', '--port', dest='port', type='int',
+ default=common.DEFAULT_WEB_SOCKET_PORT,
+ help='port to listen to')
+ parser.add_option('-P', '--validation-port', '--validation_port',
+ dest='validation_port', type='int',
+ default=None,
+ help='server port to validate in absolute path.')
+ parser.add_option('-w', '--websock-handlers', '--websock_handlers',
+ dest='websock_handlers',
+ default='.',
+ help='WebSocket handlers root directory.')
+ parser.add_option('-m', '--websock-handlers-map-file',
+ '--websock_handlers_map_file',
+ dest='websock_handlers_map_file',
+ default=None,
+ help=('WebSocket handlers map file. '
+ 'Each line consists of alias_resource_path and '
+ 'existing_resource_path, separated by spaces.'))
+ parser.add_option('-s', '--scan-dir', '--scan_dir', dest='scan_dir',
+ default=None,
+ help=('WebSocket handlers scan directory. '
+ 'Must be a directory under websock_handlers.'))
+ parser.add_option('--allow-handlers-outside-root-dir',
+ '--allow_handlers_outside_root_dir',
+ dest='allow_handlers_outside_root_dir',
+ action='store_true',
+ default=False,
+ help=('Scans WebSocket handlers even if their canonical '
+ 'path is not under websock_handlers.'))
+ parser.add_option('-d', '--document-root', '--document_root',
+ dest='document_root', default='.',
+ help='Document root directory.')
+ parser.add_option('-x', '--cgi-paths', '--cgi_paths', dest='cgi_paths',
+ default=None,
+ help=('CGI paths relative to document_root.'
+ 'Comma-separated. (e.g -x /cgi,/htbin) '
+ 'Files under document_root/cgi_path are handled '
+ 'as CGI programs. Must be executable.'))
+ parser.add_option('-t', '--tls', dest='use_tls', action='store_true',
+ default=False, help='use TLS (wss://)')
+ parser.add_option('-k', '--private-key', '--private_key',
+ dest='private_key',
+ default='', help='TLS private key file.')
+ parser.add_option('-c', '--certificate', dest='certificate',
+ default='', help='TLS certificate file.')
+ parser.add_option('--ca-certificate', dest='ca_certificate', default='',
+ help=('TLS CA certificate file for client '
+ 'authentication.'))
+ parser.add_option('-l', '--log-file', '--log_file', dest='log_file',
+ default='', help='Log file.')
+ # Custom log level:
+ # - FINE: Prints status of each frame processing step
+ parser.add_option('--log-level', '--log_level', type='choice',
+ dest='log_level', default='warn',
+ choices=['fine',
+ 'debug', 'info', 'warning', 'warn', 'error',
+ 'critical'],
+ help='Log level.')
+ parser.add_option('--deflate-log-level', '--deflate_log_level',
+ type='choice',
+ dest='deflate_log_level', default='warn',
+ choices=['debug', 'info', 'warning', 'warn', 'error',
+ 'critical'],
+ help='Log level for _Deflater and _Inflater.')
+ parser.add_option('--thread-monitor-interval-in-sec',
+ '--thread_monitor_interval_in_sec',
+ dest='thread_monitor_interval_in_sec',
+ type='int', default=-1,
+ help=('If positive integer is specified, run a thread '
+ 'monitor to show the status of server threads '
+ 'periodically in the specified inteval in '
+ 'second. If non-positive integer is specified, '
+ 'disable the thread monitor.'))
+ parser.add_option('--log-max', '--log_max', dest='log_max', type='int',
+ default=_DEFAULT_LOG_MAX_BYTES,
+ help='Log maximum bytes')
+ parser.add_option('--log-count', '--log_count', dest='log_count',
+ type='int', default=_DEFAULT_LOG_BACKUP_COUNT,
+ help='Log backup count')
+ parser.add_option('--allow-draft75', dest='allow_draft75',
+ action='store_true', default=False,
+ help='Allow draft 75 handshake')
+ parser.add_option('--strict', dest='strict', action='store_true',
+ default=False, help='Strictly check handshake request')
+ parser.add_option('-q', '--queue', dest='request_queue_size', type='int',
+ default=_DEFAULT_REQUEST_QUEUE_SIZE,
+ help='request queue size')
+
+ return parser
+
+
+class ThreadMonitor(threading.Thread):
+ daemon = True
+
+ def __init__(self, interval_in_sec):
+ threading.Thread.__init__(self, name='ThreadMonitor')
+
+ self._logger = util.get_class_logger(self)
+
+ self._interval_in_sec = interval_in_sec
+
+ def run(self):
+ while True:
+ thread_name_list = []
+ for thread in threading.enumerate():
+ thread_name_list.append(thread.name)
+ self._logger.info(
+ "%d active threads: %s",
+ threading.active_count(),
+ ', '.join(thread_name_list))
+ time.sleep(self._interval_in_sec)
+
+
+def _parse_args_and_config(args):
+ parser = _build_option_parser()
+
+ # First, parse options without configuration file.
+ temporary_options, temporary_args = parser.parse_args(args=args)
+ if temporary_args:
+ logging.critical(
+ 'Unrecognized positional arguments: %r', temporary_args)
+ sys.exit(1)
+
+ if temporary_options.config_file:
+ try:
+ config_fp = open(temporary_options.config_file, 'r')
+ except IOError, e:
+ logging.critical(
+ 'Failed to open configuration file %r: %r',
+ temporary_options.config_file,
+ e)
+ sys.exit(1)
+
+ config_parser = ConfigParser.SafeConfigParser()
+ config_parser.readfp(config_fp)
+ config_fp.close()
+
+ args_from_config = []
+ for name, value in config_parser.items('pywebsocket'):
+ args_from_config.append('--' + name)
+ args_from_config.append(value)
+ if args is None:
+ args = args_from_config
+ else:
+ args = args_from_config + args
+ return parser.parse_args(args=args)
+ else:
+ return temporary_options, temporary_args
+
+
+def _main(args=None):
+ options, args = _parse_args_and_config(args=args)
+
+ os.chdir(options.document_root)
+
+ _configure_logging(options)
+
+ # TODO(tyoshino): Clean up initialization of CGI related values. Move some
+ # of code here to WebSocketRequestHandler class if it's better.
+ options.cgi_directories = []
+ options.is_executable_method = None
+ if options.cgi_paths:
+ options.cgi_directories = options.cgi_paths.split(',')
+ if sys.platform in ('cygwin', 'win32'):
+ cygwin_path = None
+ # For Win32 Python, it is expected that CYGWIN_PATH
+ # is set to a directory of cygwin binaries.
+ # For example, websocket_server.py in Chromium sets CYGWIN_PATH to
+ # full path of third_party/cygwin/bin.
+ if 'CYGWIN_PATH' in os.environ:
+ cygwin_path = os.environ['CYGWIN_PATH']
+ util.wrap_popen3_for_win(cygwin_path)
+
+ def __check_script(scriptpath):
+ return util.get_script_interp(scriptpath, cygwin_path)
+
+ options.is_executable_method = __check_script
+
+ if options.use_tls:
+ if not (_HAS_SSL or _HAS_OPEN_SSL):
+ logging.critical('TLS support requires ssl or pyOpenSSL module.')
+ sys.exit(1)
+ if not options.private_key or not options.certificate:
+ logging.critical(
+ 'To use TLS, specify private_key and certificate.')
+ sys.exit(1)
+
+ if options.ca_certificate:
+ if not options.use_tls:
+ logging.critical('TLS must be enabled for client authentication.')
+ sys.exit(1)
+ if not _HAS_SSL:
+ logging.critical('Client authentication requires ssl module.')
+
+ if not options.scan_dir:
+ options.scan_dir = options.websock_handlers
+
+ try:
+ if options.thread_monitor_interval_in_sec > 0:
+ # Run a thread monitor to show the status of server threads for
+ # debugging.
+ ThreadMonitor(options.thread_monitor_interval_in_sec).start()
+
+ # Share a Dispatcher among request handlers to save time for
+ # instantiation. Dispatcher can be shared because it is thread-safe.
+ options.dispatcher = dispatch.Dispatcher(
+ options.websock_handlers,
+ options.scan_dir,
+ options.allow_handlers_outside_root_dir)
+ if options.websock_handlers_map_file:
+ _alias_handlers(options.dispatcher,
+ options.websock_handlers_map_file)
+ warnings = options.dispatcher.source_warnings()
+ if warnings:
+ for warning in warnings:
+ logging.warning('mod_pywebsocket: %s' % warning)
+
+ server = WebSocketServer(options)
+ server.serve_forever()
+ except Exception, e:
+ logging.critical('mod_pywebsocket: %s' % e)
+ logging.critical('mod_pywebsocket: %s' % util.get_stack_trace())
+ sys.exit(1)
+
+
+if __name__ == '__main__':
+ _main(sys.argv[1:])
+
+
+# vi:sts=4 sw=4 et
diff --git a/module/lib/mod_pywebsocket/stream.py b/module/lib/mod_pywebsocket/stream.py
new file mode 100644
index 000000000..d051eee20
--- /dev/null
+++ b/module/lib/mod_pywebsocket/stream.py
@@ -0,0 +1,56 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""This file exports public symbols.
+"""
+
+
+from mod_pywebsocket._stream_base import BadOperationException
+from mod_pywebsocket._stream_base import ConnectionTerminatedException
+from mod_pywebsocket._stream_base import InvalidFrameException
+from mod_pywebsocket._stream_base import InvalidUTF8Exception
+from mod_pywebsocket._stream_base import UnsupportedFrameException
+from mod_pywebsocket._stream_hixie75 import StreamHixie75
+from mod_pywebsocket._stream_hybi import Frame
+from mod_pywebsocket._stream_hybi import Stream
+from mod_pywebsocket._stream_hybi import StreamOptions
+
+# These methods are intended to be used by WebSocket client developers to have
+# their implementations receive broken data in tests.
+from mod_pywebsocket._stream_hybi import create_close_frame
+from mod_pywebsocket._stream_hybi import create_header
+from mod_pywebsocket._stream_hybi import create_length_header
+from mod_pywebsocket._stream_hybi import create_ping_frame
+from mod_pywebsocket._stream_hybi import create_pong_frame
+from mod_pywebsocket._stream_hybi import create_binary_frame
+from mod_pywebsocket._stream_hybi import create_text_frame
+
+
+# vi:sts=4 sw=4 et
diff --git a/module/lib/mod_pywebsocket/util.py b/module/lib/mod_pywebsocket/util.py
new file mode 100644
index 000000000..d60b53f75
--- /dev/null
+++ b/module/lib/mod_pywebsocket/util.py
@@ -0,0 +1,466 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""WebSocket utilities.
+"""
+
+import errno
+
+# Import hash classes from a module available and recommended for each Python
+# version and re-export those symbol. Use sha and md5 module in Python 2.4, and
+# hashlib module in Python 2.6.
+try:
+ import hashlib
+ md5_hash = hashlib.md5
+ sha1_hash = hashlib.sha1
+except ImportError:
+ import md5
+ import sha
+ md5_hash = md5.md5
+ sha1_hash = sha.sha
+
+import StringIO
+import logging
+import os
+import re
+import socket
+import traceback
+import zlib
+
+
+def get_stack_trace():
+ """Get the current stack trace as string.
+
+ This is needed to support Python 2.3.
+ TODO: Remove this when we only support Python 2.4 and above.
+ Use traceback.format_exc instead.
+ """
+
+ out = StringIO.StringIO()
+ traceback.print_exc(file=out)
+ return out.getvalue()
+
+
+def prepend_message_to_exception(message, exc):
+ """Prepend message to the exception."""
+
+ exc.args = (message + str(exc),)
+ return
+
+
+def __translate_interp(interp, cygwin_path):
+ """Translate interp program path for Win32 python to run cygwin program
+ (e.g. perl). Note that it doesn't support path that contains space,
+ which is typically true for Unix, where #!-script is written.
+ For Win32 python, cygwin_path is a directory of cygwin binaries.
+
+ Args:
+ interp: interp command line
+ cygwin_path: directory name of cygwin binary, or None
+ Returns:
+ translated interp command line.
+ """
+ if not cygwin_path:
+ return interp
+ m = re.match('^[^ ]*/([^ ]+)( .*)?', interp)
+ if m:
+ cmd = os.path.join(cygwin_path, m.group(1))
+ return cmd + m.group(2)
+ return interp
+
+
+def get_script_interp(script_path, cygwin_path=None):
+ """Gets #!-interpreter command line from the script.
+
+ It also fixes command path. When Cygwin Python is used, e.g. in WebKit,
+ it could run "/usr/bin/perl -wT hello.pl".
+ When Win32 Python is used, e.g. in Chromium, it couldn't. So, fix
+ "/usr/bin/perl" to "<cygwin_path>\perl.exe".
+
+ Args:
+ script_path: pathname of the script
+ cygwin_path: directory name of cygwin binary, or None
+ Returns:
+ #!-interpreter command line, or None if it is not #!-script.
+ """
+ fp = open(script_path)
+ line = fp.readline()
+ fp.close()
+ m = re.match('^#!(.*)', line)
+ if m:
+ return __translate_interp(m.group(1), cygwin_path)
+ return None
+
+
+def wrap_popen3_for_win(cygwin_path):
+ """Wrap popen3 to support #!-script on Windows.
+
+ Args:
+ cygwin_path: path for cygwin binary if command path is needed to be
+ translated. None if no translation required.
+ """
+
+ __orig_popen3 = os.popen3
+
+ def __wrap_popen3(cmd, mode='t', bufsize=-1):
+ cmdline = cmd.split(' ')
+ interp = get_script_interp(cmdline[0], cygwin_path)
+ if interp:
+ cmd = interp + ' ' + cmd
+ return __orig_popen3(cmd, mode, bufsize)
+
+ os.popen3 = __wrap_popen3
+
+
+def hexify(s):
+ return ' '.join(map(lambda x: '%02x' % ord(x), s))
+
+
+def get_class_logger(o):
+ return logging.getLogger(
+ '%s.%s' % (o.__class__.__module__, o.__class__.__name__))
+
+
+class NoopMasker(object):
+ """A masking object that has the same interface as RepeatedXorMasker but
+ just returns the string passed in without making any change.
+ """
+
+ def __init__(self):
+ pass
+
+ def mask(self, s):
+ return s
+
+
+class DeflateRequest(object):
+ """A wrapper class for request object to intercept send and recv to perform
+ deflate compression and decompression transparently.
+ """
+
+ def __init__(self, request):
+ self._request = request
+ self.connection = DeflateConnection(request.connection)
+
+ def __getattribute__(self, name):
+ if name in ('_request', 'connection'):
+ return object.__getattribute__(self, name)
+ return self._request.__getattribute__(name)
+
+ def __setattr__(self, name, value):
+ if name in ('_request', 'connection'):
+ return object.__setattr__(self, name, value)
+ return self._request.__setattr__(name, value)
+
+
+# By making wbits option negative, we can suppress CMF/FLG (2 octet) and
+# ADLER32 (4 octet) fields of zlib so that we can use zlib module just as
+# deflate library. DICTID won't be added as far as we don't set dictionary.
+# LZ77 window of 32K will be used for both compression and decompression.
+# For decompression, we can just use 32K to cover any windows size. For
+# compression, we use 32K so receivers must use 32K.
+#
+# Compression level is Z_DEFAULT_COMPRESSION. We don't have to match level
+# to decode.
+#
+# See zconf.h, deflate.cc, inflate.cc of zlib library, and zlibmodule.c of
+# Python. See also RFC1950 (ZLIB 3.3).
+
+
+class _Deflater(object):
+
+ def __init__(self, window_bits):
+ self._logger = get_class_logger(self)
+
+ self._compress = zlib.compressobj(
+ zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -window_bits)
+
+ def compress_and_flush(self, bytes):
+ compressed_bytes = self._compress.compress(bytes)
+ compressed_bytes += self._compress.flush(zlib.Z_SYNC_FLUSH)
+ self._logger.debug('Compress input %r', bytes)
+ self._logger.debug('Compress result %r', compressed_bytes)
+ return compressed_bytes
+
+
+class _Inflater(object):
+
+ def __init__(self):
+ self._logger = get_class_logger(self)
+
+ self._unconsumed = ''
+
+ self.reset()
+
+ def decompress(self, size):
+ if not (size == -1 or size > 0):
+ raise Exception('size must be -1 or positive')
+
+ data = ''
+
+ while True:
+ if size == -1:
+ data += self._decompress.decompress(self._unconsumed)
+ # See Python bug http://bugs.python.org/issue12050 to
+ # understand why the same code cannot be used for updating
+ # self._unconsumed for here and else block.
+ self._unconsumed = ''
+ else:
+ data += self._decompress.decompress(
+ self._unconsumed, size - len(data))
+ self._unconsumed = self._decompress.unconsumed_tail
+ if self._decompress.unused_data:
+ # Encountered a last block (i.e. a block with BFINAL = 1) and
+ # found a new stream (unused_data). We cannot use the same
+ # zlib.Decompress object for the new stream. Create a new
+ # Decompress object to decompress the new one.
+ #
+ # It's fine to ignore unconsumed_tail if unused_data is not
+ # empty.
+ self._unconsumed = self._decompress.unused_data
+ self.reset()
+ if size >= 0 and len(data) == size:
+ # data is filled. Don't call decompress again.
+ break
+ else:
+ # Re-invoke Decompress.decompress to try to decompress all
+ # available bytes before invoking read which blocks until
+ # any new byte is available.
+ continue
+ else:
+ # Here, since unused_data is empty, even if unconsumed_tail is
+ # not empty, bytes of requested length are already in data. We
+ # don't have to "continue" here.
+ break
+
+ if data:
+ self._logger.debug('Decompressed %r', data)
+ return data
+
+ def append(self, data):
+ self._logger.debug('Appended %r', data)
+ self._unconsumed += data
+
+ def reset(self):
+ self._logger.debug('Reset')
+ self._decompress = zlib.decompressobj(-zlib.MAX_WBITS)
+
+
+# Compresses/decompresses given octets using the method introduced in RFC1979.
+
+
+class _RFC1979Deflater(object):
+ """A compressor class that applies DEFLATE to given byte sequence and
+ flushes using the algorithm described in the RFC1979 section 2.1.
+ """
+
+ def __init__(self, window_bits, no_context_takeover):
+ self._deflater = None
+ if window_bits is None:
+ window_bits = zlib.MAX_WBITS
+ self._window_bits = window_bits
+ self._no_context_takeover = no_context_takeover
+
+ def filter(self, bytes):
+ if self._deflater is None or self._no_context_takeover:
+ self._deflater = _Deflater(self._window_bits)
+
+ # Strip last 4 octets which is LEN and NLEN field of a non-compressed
+ # block added for Z_SYNC_FLUSH.
+ return self._deflater.compress_and_flush(bytes)[:-4]
+
+
+class _RFC1979Inflater(object):
+ """A decompressor class for byte sequence compressed and flushed following
+ the algorithm described in the RFC1979 section 2.1.
+ """
+
+ def __init__(self):
+ self._inflater = _Inflater()
+
+ def filter(self, bytes):
+ # Restore stripped LEN and NLEN field of a non-compressed block added
+ # for Z_SYNC_FLUSH.
+ self._inflater.append(bytes + '\x00\x00\xff\xff')
+ return self._inflater.decompress(-1)
+
+
+class DeflateSocket(object):
+ """A wrapper class for socket object to intercept send and recv to perform
+ deflate compression and decompression transparently.
+ """
+
+ # Size of the buffer passed to recv to receive compressed data.
+ _RECV_SIZE = 4096
+
+ def __init__(self, socket):
+ self._socket = socket
+
+ self._logger = get_class_logger(self)
+
+ self._deflater = _Deflater(zlib.MAX_WBITS)
+ self._inflater = _Inflater()
+
+ def recv(self, size):
+ """Receives data from the socket specified on the construction up
+ to the specified size. Once any data is available, returns it even
+ if it's smaller than the specified size.
+ """
+
+ # TODO(tyoshino): Allow call with size=0. It should block until any
+ # decompressed data is available.
+ if size <= 0:
+ raise Exception('Non-positive size passed')
+ while True:
+ data = self._inflater.decompress(size)
+ if len(data) != 0:
+ return data
+
+ read_data = self._socket.recv(DeflateSocket._RECV_SIZE)
+ if not read_data:
+ return ''
+ self._inflater.append(read_data)
+
+ def sendall(self, bytes):
+ self.send(bytes)
+
+ def send(self, bytes):
+ self._socket.sendall(self._deflater.compress_and_flush(bytes))
+ return len(bytes)
+
+
+class DeflateConnection(object):
+ """A wrapper class for request object to intercept write and read to
+ perform deflate compression and decompression transparently.
+ """
+
+ def __init__(self, connection):
+ self._connection = connection
+
+ self._logger = get_class_logger(self)
+
+ self._deflater = _Deflater(zlib.MAX_WBITS)
+ self._inflater = _Inflater()
+
+ def get_remote_addr(self):
+ return self._connection.remote_addr
+ remote_addr = property(get_remote_addr)
+
+ def put_bytes(self, bytes):
+ self.write(bytes)
+
+ def read(self, size=-1):
+ """Reads at most size bytes. Blocks until there's at least one byte
+ available.
+ """
+
+ # TODO(tyoshino): Allow call with size=0.
+ if not (size == -1 or size > 0):
+ raise Exception('size must be -1 or positive')
+
+ data = ''
+ while True:
+ if size == -1:
+ data += self._inflater.decompress(-1)
+ else:
+ data += self._inflater.decompress(size - len(data))
+
+ if size >= 0 and len(data) != 0:
+ break
+
+ # TODO(tyoshino): Make this read efficient by some workaround.
+ #
+ # In 3.0.3 and prior of mod_python, read blocks until length bytes
+ # was read. We don't know the exact size to read while using
+ # deflate, so read byte-by-byte.
+ #
+ # _StandaloneRequest.read that ultimately performs
+ # socket._fileobject.read also blocks until length bytes was read
+ read_data = self._connection.read(1)
+ if not read_data:
+ break
+ self._inflater.append(read_data)
+ return data
+
+ def write(self, bytes):
+ self._connection.write(self._deflater.compress_and_flush(bytes))
+
+
+def _is_ewouldblock_errno(error_number):
+ """Returns True iff error_number indicates that receive operation would
+ block. To make this portable, we check availability of errno and then
+ compare them.
+ """
+
+ for error_name in ['WSAEWOULDBLOCK', 'EWOULDBLOCK', 'EAGAIN']:
+ if (error_name in dir(errno) and
+ error_number == getattr(errno, error_name)):
+ return True
+ return False
+
+
+def drain_received_data(raw_socket):
+ # Set the socket non-blocking.
+ original_timeout = raw_socket.gettimeout()
+ raw_socket.settimeout(0.0)
+
+ drained_data = []
+
+ # Drain until the socket is closed or no data is immediately
+ # available for read.
+ while True:
+ try:
+ data = raw_socket.recv(1)
+ if not data:
+ break
+ drained_data.append(data)
+ except socket.error, e:
+ # e can be either a pair (errno, string) or just a string (or
+ # something else) telling what went wrong. We suppress only
+ # the errors that indicates that the socket blocks. Those
+ # exceptions can be parsed as a pair (errno, string).
+ try:
+ error_number, message = e
+ except:
+ # Failed to parse socket.error.
+ raise e
+
+ if _is_ewouldblock_errno(error_number):
+ break
+ else:
+ raise e
+
+ # Rollback timeout value.
+ raw_socket.settimeout(original_timeout)
+
+ return ''.join(drained_data)
+
+
+# vi:sts=4 sw=4 et
diff --git a/module/lib/thrift/TSCons.py b/module/lib/thrift/TSCons.py
deleted file mode 100644
index 24046256c..000000000
--- a/module/lib/thrift/TSCons.py
+++ /dev/null
@@ -1,33 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-from os import path
-from SCons.Builder import Builder
-
-def scons_env(env, add=''):
- opath = path.dirname(path.abspath('$TARGET'))
- lstr = 'thrift --gen cpp -o ' + opath + ' ' + add + ' $SOURCE'
- cppbuild = Builder(action = lstr)
- env.Append(BUILDERS = {'ThriftCpp' : cppbuild})
-
-def gen_cpp(env, dir, file):
- scons_env(env)
- suffixes = ['_types.h', '_types.cpp']
- targets = map(lambda s: 'gen-cpp/' + file + s, suffixes)
- return env.ThriftCpp(targets, dir+file+'.thrift')
diff --git a/module/lib/thrift/TSerialization.py b/module/lib/thrift/TSerialization.py
deleted file mode 100644
index b19f98aa8..000000000
--- a/module/lib/thrift/TSerialization.py
+++ /dev/null
@@ -1,34 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-from protocol import TBinaryProtocol
-from transport import TTransport
-
-def serialize(thrift_object, protocol_factory = TBinaryProtocol.TBinaryProtocolFactory()):
- transport = TTransport.TMemoryBuffer()
- protocol = protocol_factory.getProtocol(transport)
- thrift_object.write(protocol)
- return transport.getvalue()
-
-def deserialize(base, buf, protocol_factory = TBinaryProtocol.TBinaryProtocolFactory()):
- transport = TTransport.TMemoryBuffer(buf)
- protocol = protocol_factory.getProtocol(transport)
- base.read(protocol)
- return base
-
diff --git a/module/lib/thrift/Thrift.py b/module/lib/thrift/Thrift.py
deleted file mode 100644
index 1d271fcff..000000000
--- a/module/lib/thrift/Thrift.py
+++ /dev/null
@@ -1,154 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-import sys
-
-class TType:
- STOP = 0
- VOID = 1
- BOOL = 2
- BYTE = 3
- I08 = 3
- DOUBLE = 4
- I16 = 6
- I32 = 8
- I64 = 10
- STRING = 11
- UTF7 = 11
- STRUCT = 12
- MAP = 13
- SET = 14
- LIST = 15
- UTF8 = 16
- UTF16 = 17
-
- _VALUES_TO_NAMES = ( 'STOP',
- 'VOID',
- 'BOOL',
- 'BYTE',
- 'DOUBLE',
- None,
- 'I16',
- None,
- 'I32',
- None,
- 'I64',
- 'STRING',
- 'STRUCT',
- 'MAP',
- 'SET',
- 'LIST',
- 'UTF8',
- 'UTF16' )
-
-class TMessageType:
- CALL = 1
- REPLY = 2
- EXCEPTION = 3
- ONEWAY = 4
-
-class TProcessor:
-
- """Base class for procsessor, which works on two streams."""
-
- def process(iprot, oprot):
- pass
-
-class TException(Exception):
-
- """Base class for all thrift exceptions."""
-
- # BaseException.message is deprecated in Python v[2.6,3.0)
- if (2,6,0) <= sys.version_info < (3,0):
- def _get_message(self):
- return self._message
- def _set_message(self, message):
- self._message = message
- message = property(_get_message, _set_message)
-
- def __init__(self, message=None):
- Exception.__init__(self, message)
- self.message = message
-
-class TApplicationException(TException):
-
- """Application level thrift exceptions."""
-
- UNKNOWN = 0
- UNKNOWN_METHOD = 1
- INVALID_MESSAGE_TYPE = 2
- WRONG_METHOD_NAME = 3
- BAD_SEQUENCE_ID = 4
- MISSING_RESULT = 5
- INTERNAL_ERROR = 6
- PROTOCOL_ERROR = 7
-
- def __init__(self, type=UNKNOWN, message=None):
- TException.__init__(self, message)
- self.type = type
-
- def __str__(self):
- if self.message:
- return self.message
- elif self.type == self.UNKNOWN_METHOD:
- return 'Unknown method'
- elif self.type == self.INVALID_MESSAGE_TYPE:
- return 'Invalid message type'
- elif self.type == self.WRONG_METHOD_NAME:
- return 'Wrong method name'
- elif self.type == self.BAD_SEQUENCE_ID:
- return 'Bad sequence ID'
- elif self.type == self.MISSING_RESULT:
- return 'Missing result'
- else:
- return 'Default (unknown) TApplicationException'
-
- def read(self, iprot):
- iprot.readStructBegin()
- while True:
- (fname, ftype, fid) = iprot.readFieldBegin()
- if ftype == TType.STOP:
- break
- if fid == 1:
- if ftype == TType.STRING:
- self.message = iprot.readString();
- else:
- iprot.skip(ftype)
- elif fid == 2:
- if ftype == TType.I32:
- self.type = iprot.readI32();
- else:
- iprot.skip(ftype)
- else:
- iprot.skip(ftype)
- iprot.readFieldEnd()
- iprot.readStructEnd()
-
- def write(self, oprot):
- oprot.writeStructBegin('TApplicationException')
- if self.message != None:
- oprot.writeFieldBegin('message', TType.STRING, 1)
- oprot.writeString(self.message)
- oprot.writeFieldEnd()
- if self.type != None:
- oprot.writeFieldBegin('type', TType.I32, 2)
- oprot.writeI32(self.type)
- oprot.writeFieldEnd()
- oprot.writeFieldStop()
- oprot.writeStructEnd()
diff --git a/module/lib/thrift/__init__.py b/module/lib/thrift/__init__.py
deleted file mode 100644
index 48d659c40..000000000
--- a/module/lib/thrift/__init__.py
+++ /dev/null
@@ -1,20 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-__all__ = ['Thrift', 'TSCons']
diff --git a/module/lib/thrift/protocol/TBase.py b/module/lib/thrift/protocol/TBase.py
deleted file mode 100644
index e675c7dc0..000000000
--- a/module/lib/thrift/protocol/TBase.py
+++ /dev/null
@@ -1,72 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-from thrift.Thrift import *
-from thrift.protocol import TBinaryProtocol
-from thrift.transport import TTransport
-
-try:
- from thrift.protocol import fastbinary
-except:
- fastbinary = None
-
-class TBase(object):
- __slots__ = []
-
- def __repr__(self):
- L = ['%s=%r' % (key, getattr(self, key))
- for key in self.__slots__ ]
- return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
-
- def __eq__(self, other):
- if not isinstance(other, self.__class__):
- return False
- for attr in self.__slots__:
- my_val = getattr(self, attr)
- other_val = getattr(other, attr)
- if my_val != other_val:
- return False
- return True
-
- def __ne__(self, other):
- return not (self == other)
-
- def read(self, iprot):
- if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
- fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
- return
- iprot.readStruct(self, self.thrift_spec)
-
- def write(self, oprot):
- if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
- oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
- return
- oprot.writeStruct(self, self.thrift_spec)
-
-class TExceptionBase(Exception):
- # old style class so python2.4 can raise exceptions derived from this
- # This can't inherit from TBase because of that limitation.
- __slots__ = []
-
- __repr__ = TBase.__repr__.im_func
- __eq__ = TBase.__eq__.im_func
- __ne__ = TBase.__ne__.im_func
- read = TBase.read.im_func
- write = TBase.write.im_func
-
diff --git a/module/lib/thrift/protocol/TBinaryProtocol.py b/module/lib/thrift/protocol/TBinaryProtocol.py
deleted file mode 100644
index 50c6aa896..000000000
--- a/module/lib/thrift/protocol/TBinaryProtocol.py
+++ /dev/null
@@ -1,259 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-from TProtocol import *
-from struct import pack, unpack
-
-class TBinaryProtocol(TProtocolBase):
-
- """Binary implementation of the Thrift protocol driver."""
-
- # NastyHaxx. Python 2.4+ on 32-bit machines forces hex constants to be
- # positive, converting this into a long. If we hardcode the int value
- # instead it'll stay in 32 bit-land.
-
- # VERSION_MASK = 0xffff0000
- VERSION_MASK = -65536
-
- # VERSION_1 = 0x80010000
- VERSION_1 = -2147418112
-
- TYPE_MASK = 0x000000ff
-
- def __init__(self, trans, strictRead=False, strictWrite=True):
- TProtocolBase.__init__(self, trans)
- self.strictRead = strictRead
- self.strictWrite = strictWrite
-
- def writeMessageBegin(self, name, type, seqid):
- if self.strictWrite:
- self.writeI32(TBinaryProtocol.VERSION_1 | type)
- self.writeString(name)
- self.writeI32(seqid)
- else:
- self.writeString(name)
- self.writeByte(type)
- self.writeI32(seqid)
-
- def writeMessageEnd(self):
- pass
-
- def writeStructBegin(self, name):
- pass
-
- def writeStructEnd(self):
- pass
-
- def writeFieldBegin(self, name, type, id):
- self.writeByte(type)
- self.writeI16(id)
-
- def writeFieldEnd(self):
- pass
-
- def writeFieldStop(self):
- self.writeByte(TType.STOP);
-
- def writeMapBegin(self, ktype, vtype, size):
- self.writeByte(ktype)
- self.writeByte(vtype)
- self.writeI32(size)
-
- def writeMapEnd(self):
- pass
-
- def writeListBegin(self, etype, size):
- self.writeByte(etype)
- self.writeI32(size)
-
- def writeListEnd(self):
- pass
-
- def writeSetBegin(self, etype, size):
- self.writeByte(etype)
- self.writeI32(size)
-
- def writeSetEnd(self):
- pass
-
- def writeBool(self, bool):
- if bool:
- self.writeByte(1)
- else:
- self.writeByte(0)
-
- def writeByte(self, byte):
- buff = pack("!b", byte)
- self.trans.write(buff)
-
- def writeI16(self, i16):
- buff = pack("!h", i16)
- self.trans.write(buff)
-
- def writeI32(self, i32):
- buff = pack("!i", i32)
- self.trans.write(buff)
-
- def writeI64(self, i64):
- buff = pack("!q", i64)
- self.trans.write(buff)
-
- def writeDouble(self, dub):
- buff = pack("!d", dub)
- self.trans.write(buff)
-
- def writeString(self, str):
- self.writeI32(len(str))
- self.trans.write(str)
-
- def readMessageBegin(self):
- sz = self.readI32()
- if sz < 0:
- version = sz & TBinaryProtocol.VERSION_MASK
- if version != TBinaryProtocol.VERSION_1:
- raise TProtocolException(type=TProtocolException.BAD_VERSION, message='Bad version in readMessageBegin: %d' % (sz))
- type = sz & TBinaryProtocol.TYPE_MASK
- name = self.readString()
- seqid = self.readI32()
- else:
- if self.strictRead:
- raise TProtocolException(type=TProtocolException.BAD_VERSION, message='No protocol version header')
- name = self.trans.readAll(sz)
- type = self.readByte()
- seqid = self.readI32()
- return (name, type, seqid)
-
- def readMessageEnd(self):
- pass
-
- def readStructBegin(self):
- pass
-
- def readStructEnd(self):
- pass
-
- def readFieldBegin(self):
- type = self.readByte()
- if type == TType.STOP:
- return (None, type, 0)
- id = self.readI16()
- return (None, type, id)
-
- def readFieldEnd(self):
- pass
-
- def readMapBegin(self):
- ktype = self.readByte()
- vtype = self.readByte()
- size = self.readI32()
- return (ktype, vtype, size)
-
- def readMapEnd(self):
- pass
-
- def readListBegin(self):
- etype = self.readByte()
- size = self.readI32()
- return (etype, size)
-
- def readListEnd(self):
- pass
-
- def readSetBegin(self):
- etype = self.readByte()
- size = self.readI32()
- return (etype, size)
-
- def readSetEnd(self):
- pass
-
- def readBool(self):
- byte = self.readByte()
- if byte == 0:
- return False
- return True
-
- def readByte(self):
- buff = self.trans.readAll(1)
- val, = unpack('!b', buff)
- return val
-
- def readI16(self):
- buff = self.trans.readAll(2)
- val, = unpack('!h', buff)
- return val
-
- def readI32(self):
- buff = self.trans.readAll(4)
- val, = unpack('!i', buff)
- return val
-
- def readI64(self):
- buff = self.trans.readAll(8)
- val, = unpack('!q', buff)
- return val
-
- def readDouble(self):
- buff = self.trans.readAll(8)
- val, = unpack('!d', buff)
- return val
-
- def readString(self):
- len = self.readI32()
- str = self.trans.readAll(len)
- return str
-
-
-class TBinaryProtocolFactory:
- def __init__(self, strictRead=False, strictWrite=True):
- self.strictRead = strictRead
- self.strictWrite = strictWrite
-
- def getProtocol(self, trans):
- prot = TBinaryProtocol(trans, self.strictRead, self.strictWrite)
- return prot
-
-
-class TBinaryProtocolAccelerated(TBinaryProtocol):
-
- """C-Accelerated version of TBinaryProtocol.
-
- This class does not override any of TBinaryProtocol's methods,
- but the generated code recognizes it directly and will call into
- our C module to do the encoding, bypassing this object entirely.
- We inherit from TBinaryProtocol so that the normal TBinaryProtocol
- encoding can happen if the fastbinary module doesn't work for some
- reason. (TODO(dreiss): Make this happen sanely in more cases.)
-
- In order to take advantage of the C module, just use
- TBinaryProtocolAccelerated instead of TBinaryProtocol.
-
- NOTE: This code was contributed by an external developer.
- The internal Thrift team has reviewed and tested it,
- but we cannot guarantee that it is production-ready.
- Please feel free to report bugs and/or success stories
- to the public mailing list.
- """
-
- pass
-
-
-class TBinaryProtocolAcceleratedFactory:
- def getProtocol(self, trans):
- return TBinaryProtocolAccelerated(trans)
diff --git a/module/lib/thrift/protocol/TCompactProtocol.py b/module/lib/thrift/protocol/TCompactProtocol.py
deleted file mode 100644
index 016a33171..000000000
--- a/module/lib/thrift/protocol/TCompactProtocol.py
+++ /dev/null
@@ -1,395 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-from TProtocol import *
-from struct import pack, unpack
-
-__all__ = ['TCompactProtocol', 'TCompactProtocolFactory']
-
-CLEAR = 0
-FIELD_WRITE = 1
-VALUE_WRITE = 2
-CONTAINER_WRITE = 3
-BOOL_WRITE = 4
-FIELD_READ = 5
-CONTAINER_READ = 6
-VALUE_READ = 7
-BOOL_READ = 8
-
-def make_helper(v_from, container):
- def helper(func):
- def nested(self, *args, **kwargs):
- assert self.state in (v_from, container), (self.state, v_from, container)
- return func(self, *args, **kwargs)
- return nested
- return helper
-writer = make_helper(VALUE_WRITE, CONTAINER_WRITE)
-reader = make_helper(VALUE_READ, CONTAINER_READ)
-
-def makeZigZag(n, bits):
- return (n << 1) ^ (n >> (bits - 1))
-
-def fromZigZag(n):
- return (n >> 1) ^ -(n & 1)
-
-def writeVarint(trans, n):
- out = []
- while True:
- if n & ~0x7f == 0:
- out.append(n)
- break
- else:
- out.append((n & 0xff) | 0x80)
- n = n >> 7
- trans.write(''.join(map(chr, out)))
-
-def readVarint(trans):
- result = 0
- shift = 0
- while True:
- x = trans.readAll(1)
- byte = ord(x)
- result |= (byte & 0x7f) << shift
- if byte >> 7 == 0:
- return result
- shift += 7
-
-class CompactType:
- STOP = 0x00
- TRUE = 0x01
- FALSE = 0x02
- BYTE = 0x03
- I16 = 0x04
- I32 = 0x05
- I64 = 0x06
- DOUBLE = 0x07
- BINARY = 0x08
- LIST = 0x09
- SET = 0x0A
- MAP = 0x0B
- STRUCT = 0x0C
-
-CTYPES = {TType.STOP: CompactType.STOP,
- TType.BOOL: CompactType.TRUE, # used for collection
- TType.BYTE: CompactType.BYTE,
- TType.I16: CompactType.I16,
- TType.I32: CompactType.I32,
- TType.I64: CompactType.I64,
- TType.DOUBLE: CompactType.DOUBLE,
- TType.STRING: CompactType.BINARY,
- TType.STRUCT: CompactType.STRUCT,
- TType.LIST: CompactType.LIST,
- TType.SET: CompactType.SET,
- TType.MAP: CompactType.MAP
- }
-
-TTYPES = {}
-for k, v in CTYPES.items():
- TTYPES[v] = k
-TTYPES[CompactType.FALSE] = TType.BOOL
-del k
-del v
-
-class TCompactProtocol(TProtocolBase):
- "Compact implementation of the Thrift protocol driver."
-
- PROTOCOL_ID = 0x82
- VERSION = 1
- VERSION_MASK = 0x1f
- TYPE_MASK = 0xe0
- TYPE_SHIFT_AMOUNT = 5
-
- def __init__(self, trans):
- TProtocolBase.__init__(self, trans)
- self.state = CLEAR
- self.__last_fid = 0
- self.__bool_fid = None
- self.__bool_value = None
- self.__structs = []
- self.__containers = []
-
- def __writeVarint(self, n):
- writeVarint(self.trans, n)
-
- def writeMessageBegin(self, name, type, seqid):
- assert self.state == CLEAR
- self.__writeUByte(self.PROTOCOL_ID)
- self.__writeUByte(self.VERSION | (type << self.TYPE_SHIFT_AMOUNT))
- self.__writeVarint(seqid)
- self.__writeString(name)
- self.state = VALUE_WRITE
-
- def writeMessageEnd(self):
- assert self.state == VALUE_WRITE
- self.state = CLEAR
-
- def writeStructBegin(self, name):
- assert self.state in (CLEAR, CONTAINER_WRITE, VALUE_WRITE), self.state
- self.__structs.append((self.state, self.__last_fid))
- self.state = FIELD_WRITE
- self.__last_fid = 0
-
- def writeStructEnd(self):
- assert self.state == FIELD_WRITE
- self.state, self.__last_fid = self.__structs.pop()
-
- def writeFieldStop(self):
- self.__writeByte(0)
-
- def __writeFieldHeader(self, type, fid):
- delta = fid - self.__last_fid
- if 0 < delta <= 15:
- self.__writeUByte(delta << 4 | type)
- else:
- self.__writeByte(type)
- self.__writeI16(fid)
- self.__last_fid = fid
-
- def writeFieldBegin(self, name, type, fid):
- assert self.state == FIELD_WRITE, self.state
- if type == TType.BOOL:
- self.state = BOOL_WRITE
- self.__bool_fid = fid
- else:
- self.state = VALUE_WRITE
- self.__writeFieldHeader(CTYPES[type], fid)
-
- def writeFieldEnd(self):
- assert self.state in (VALUE_WRITE, BOOL_WRITE), self.state
- self.state = FIELD_WRITE
-
- def __writeUByte(self, byte):
- self.trans.write(pack('!B', byte))
-
- def __writeByte(self, byte):
- self.trans.write(pack('!b', byte))
-
- def __writeI16(self, i16):
- self.__writeVarint(makeZigZag(i16, 16))
-
- def __writeSize(self, i32):
- self.__writeVarint(i32)
-
- def writeCollectionBegin(self, etype, size):
- assert self.state in (VALUE_WRITE, CONTAINER_WRITE), self.state
- if size <= 14:
- self.__writeUByte(size << 4 | CTYPES[etype])
- else:
- self.__writeUByte(0xf0 | CTYPES[etype])
- self.__writeSize(size)
- self.__containers.append(self.state)
- self.state = CONTAINER_WRITE
- writeSetBegin = writeCollectionBegin
- writeListBegin = writeCollectionBegin
-
- def writeMapBegin(self, ktype, vtype, size):
- assert self.state in (VALUE_WRITE, CONTAINER_WRITE), self.state
- if size == 0:
- self.__writeByte(0)
- else:
- self.__writeSize(size)
- self.__writeUByte(CTYPES[ktype] << 4 | CTYPES[vtype])
- self.__containers.append(self.state)
- self.state = CONTAINER_WRITE
-
- def writeCollectionEnd(self):
- assert self.state == CONTAINER_WRITE, self.state
- self.state = self.__containers.pop()
- writeMapEnd = writeCollectionEnd
- writeSetEnd = writeCollectionEnd
- writeListEnd = writeCollectionEnd
-
- def writeBool(self, bool):
- if self.state == BOOL_WRITE:
- if bool:
- ctype = CompactType.TRUE
- else:
- ctype = CompactType.FALSE
- self.__writeFieldHeader(ctype, self.__bool_fid)
- elif self.state == CONTAINER_WRITE:
- if bool:
- self.__writeByte(CompactType.TRUE)
- else:
- self.__writeByte(CompactType.FALSE)
- else:
- raise AssertionError, "Invalid state in compact protocol"
-
- writeByte = writer(__writeByte)
- writeI16 = writer(__writeI16)
-
- @writer
- def writeI32(self, i32):
- self.__writeVarint(makeZigZag(i32, 32))
-
- @writer
- def writeI64(self, i64):
- self.__writeVarint(makeZigZag(i64, 64))
-
- @writer
- def writeDouble(self, dub):
- self.trans.write(pack('!d', dub))
-
- def __writeString(self, s):
- self.__writeSize(len(s))
- self.trans.write(s)
- writeString = writer(__writeString)
-
- def readFieldBegin(self):
- assert self.state == FIELD_READ, self.state
- type = self.__readUByte()
- if type & 0x0f == TType.STOP:
- return (None, 0, 0)
- delta = type >> 4
- if delta == 0:
- fid = self.__readI16()
- else:
- fid = self.__last_fid + delta
- self.__last_fid = fid
- type = type & 0x0f
- if type == CompactType.TRUE:
- self.state = BOOL_READ
- self.__bool_value = True
- elif type == CompactType.FALSE:
- self.state = BOOL_READ
- self.__bool_value = False
- else:
- self.state = VALUE_READ
- return (None, self.__getTType(type), fid)
-
- def readFieldEnd(self):
- assert self.state in (VALUE_READ, BOOL_READ), self.state
- self.state = FIELD_READ
-
- def __readUByte(self):
- result, = unpack('!B', self.trans.readAll(1))
- return result
-
- def __readByte(self):
- result, = unpack('!b', self.trans.readAll(1))
- return result
-
- def __readVarint(self):
- return readVarint(self.trans)
-
- def __readZigZag(self):
- return fromZigZag(self.__readVarint())
-
- def __readSize(self):
- result = self.__readVarint()
- if result < 0:
- raise TException("Length < 0")
- return result
-
- def readMessageBegin(self):
- assert self.state == CLEAR
- proto_id = self.__readUByte()
- if proto_id != self.PROTOCOL_ID:
- raise TProtocolException(TProtocolException.BAD_VERSION,
- 'Bad protocol id in the message: %d' % proto_id)
- ver_type = self.__readUByte()
- type = (ver_type & self.TYPE_MASK) >> self.TYPE_SHIFT_AMOUNT
- version = ver_type & self.VERSION_MASK
- if version != self.VERSION:
- raise TProtocolException(TProtocolException.BAD_VERSION,
- 'Bad version: %d (expect %d)' % (version, self.VERSION))
- seqid = self.__readVarint()
- name = self.__readString()
- return (name, type, seqid)
-
- def readMessageEnd(self):
- assert self.state == CLEAR
- assert len(self.__structs) == 0
-
- def readStructBegin(self):
- assert self.state in (CLEAR, CONTAINER_READ, VALUE_READ), self.state
- self.__structs.append((self.state, self.__last_fid))
- self.state = FIELD_READ
- self.__last_fid = 0
-
- def readStructEnd(self):
- assert self.state == FIELD_READ
- self.state, self.__last_fid = self.__structs.pop()
-
- def readCollectionBegin(self):
- assert self.state in (VALUE_READ, CONTAINER_READ), self.state
- size_type = self.__readUByte()
- size = size_type >> 4
- type = self.__getTType(size_type)
- if size == 15:
- size = self.__readSize()
- self.__containers.append(self.state)
- self.state = CONTAINER_READ
- return type, size
- readSetBegin = readCollectionBegin
- readListBegin = readCollectionBegin
-
- def readMapBegin(self):
- assert self.state in (VALUE_READ, CONTAINER_READ), self.state
- size = self.__readSize()
- types = 0
- if size > 0:
- types = self.__readUByte()
- vtype = self.__getTType(types)
- ktype = self.__getTType(types >> 4)
- self.__containers.append(self.state)
- self.state = CONTAINER_READ
- return (ktype, vtype, size)
-
- def readCollectionEnd(self):
- assert self.state == CONTAINER_READ, self.state
- self.state = self.__containers.pop()
- readSetEnd = readCollectionEnd
- readListEnd = readCollectionEnd
- readMapEnd = readCollectionEnd
-
- def readBool(self):
- if self.state == BOOL_READ:
- return self.__bool_value == CompactType.TRUE
- elif self.state == CONTAINER_READ:
- return self.__readByte() == CompactType.TRUE
- else:
- raise AssertionError, "Invalid state in compact protocol: %d" % self.state
-
- readByte = reader(__readByte)
- __readI16 = __readZigZag
- readI16 = reader(__readZigZag)
- readI32 = reader(__readZigZag)
- readI64 = reader(__readZigZag)
-
- @reader
- def readDouble(self):
- buff = self.trans.readAll(8)
- val, = unpack('!d', buff)
- return val
-
- def __readString(self):
- len = self.__readSize()
- return self.trans.readAll(len)
- readString = reader(__readString)
-
- def __getTType(self, byte):
- return TTYPES[byte & 0x0f]
-
-
-class TCompactProtocolFactory:
- def __init__(self):
- pass
-
- def getProtocol(self, trans):
- return TCompactProtocol(trans)
diff --git a/module/lib/thrift/protocol/TProtocol.py b/module/lib/thrift/protocol/TProtocol.py
deleted file mode 100644
index 7338ff68a..000000000
--- a/module/lib/thrift/protocol/TProtocol.py
+++ /dev/null
@@ -1,404 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-from thrift.Thrift import *
-
-class TProtocolException(TException):
-
- """Custom Protocol Exception class"""
-
- UNKNOWN = 0
- INVALID_DATA = 1
- NEGATIVE_SIZE = 2
- SIZE_LIMIT = 3
- BAD_VERSION = 4
-
- def __init__(self, type=UNKNOWN, message=None):
- TException.__init__(self, message)
- self.type = type
-
-class TProtocolBase:
-
- """Base class for Thrift protocol driver."""
-
- def __init__(self, trans):
- self.trans = trans
-
- def writeMessageBegin(self, name, type, seqid):
- pass
-
- def writeMessageEnd(self):
- pass
-
- def writeStructBegin(self, name):
- pass
-
- def writeStructEnd(self):
- pass
-
- def writeFieldBegin(self, name, type, id):
- pass
-
- def writeFieldEnd(self):
- pass
-
- def writeFieldStop(self):
- pass
-
- def writeMapBegin(self, ktype, vtype, size):
- pass
-
- def writeMapEnd(self):
- pass
-
- def writeListBegin(self, etype, size):
- pass
-
- def writeListEnd(self):
- pass
-
- def writeSetBegin(self, etype, size):
- pass
-
- def writeSetEnd(self):
- pass
-
- def writeBool(self, bool):
- pass
-
- def writeByte(self, byte):
- pass
-
- def writeI16(self, i16):
- pass
-
- def writeI32(self, i32):
- pass
-
- def writeI64(self, i64):
- pass
-
- def writeDouble(self, dub):
- pass
-
- def writeString(self, str):
- pass
-
- def readMessageBegin(self):
- pass
-
- def readMessageEnd(self):
- pass
-
- def readStructBegin(self):
- pass
-
- def readStructEnd(self):
- pass
-
- def readFieldBegin(self):
- pass
-
- def readFieldEnd(self):
- pass
-
- def readMapBegin(self):
- pass
-
- def readMapEnd(self):
- pass
-
- def readListBegin(self):
- pass
-
- def readListEnd(self):
- pass
-
- def readSetBegin(self):
- pass
-
- def readSetEnd(self):
- pass
-
- def readBool(self):
- pass
-
- def readByte(self):
- pass
-
- def readI16(self):
- pass
-
- def readI32(self):
- pass
-
- def readI64(self):
- pass
-
- def readDouble(self):
- pass
-
- def readString(self):
- pass
-
- def skip(self, type):
- if type == TType.STOP:
- return
- elif type == TType.BOOL:
- self.readBool()
- elif type == TType.BYTE:
- self.readByte()
- elif type == TType.I16:
- self.readI16()
- elif type == TType.I32:
- self.readI32()
- elif type == TType.I64:
- self.readI64()
- elif type == TType.DOUBLE:
- self.readDouble()
- elif type == TType.STRING:
- self.readString()
- elif type == TType.STRUCT:
- name = self.readStructBegin()
- while True:
- (name, type, id) = self.readFieldBegin()
- if type == TType.STOP:
- break
- self.skip(type)
- self.readFieldEnd()
- self.readStructEnd()
- elif type == TType.MAP:
- (ktype, vtype, size) = self.readMapBegin()
- for i in range(size):
- self.skip(ktype)
- self.skip(vtype)
- self.readMapEnd()
- elif type == TType.SET:
- (etype, size) = self.readSetBegin()
- for i in range(size):
- self.skip(etype)
- self.readSetEnd()
- elif type == TType.LIST:
- (etype, size) = self.readListBegin()
- for i in range(size):
- self.skip(etype)
- self.readListEnd()
-
- # tuple of: ( 'reader method' name, is_container boolean, 'writer_method' name )
- _TTYPE_HANDLERS = (
- (None, None, False), # 0 == TType,STOP
- (None, None, False), # 1 == TType.VOID # TODO: handle void?
- ('readBool', 'writeBool', False), # 2 == TType.BOOL
- ('readByte', 'writeByte', False), # 3 == TType.BYTE and I08
- ('readDouble', 'writeDouble', False), # 4 == TType.DOUBLE
- (None, None, False), # 5, undefined
- ('readI16', 'writeI16', False), # 6 == TType.I16
- (None, None, False), # 7, undefined
- ('readI32', 'writeI32', False), # 8 == TType.I32
- (None, None, False), # 9, undefined
- ('readI64', 'writeI64', False), # 10 == TType.I64
- ('readString', 'writeString', False), # 11 == TType.STRING and UTF7
- ('readContainerStruct', 'writeContainerStruct', True), # 12 == TType.STRUCT
- ('readContainerMap', 'writeContainerMap', True), # 13 == TType.MAP
- ('readContainerSet', 'writeContainerSet', True), # 14 == TType.SET
- ('readContainerList', 'writeContainerList', True), # 15 == TType.LIST
- (None, None, False), # 16 == TType.UTF8 # TODO: handle utf8 types?
- (None, None, False)# 17 == TType.UTF16 # TODO: handle utf16 types?
- )
-
- def readFieldByTType(self, ttype, spec):
- try:
- (r_handler, w_handler, is_container) = self._TTYPE_HANDLERS[ttype]
- except IndexError:
- raise TProtocolException(type=TProtocolException.INVALID_DATA,
- message='Invalid field type %d' % (ttype))
- if r_handler is None:
- raise TProtocolException(type=TProtocolException.INVALID_DATA,
- message='Invalid field type %d' % (ttype))
- reader = getattr(self, r_handler)
- if not is_container:
- return reader()
- return reader(spec)
-
- def readContainerList(self, spec):
- results = []
- ttype, tspec = spec[0], spec[1]
- r_handler = self._TTYPE_HANDLERS[ttype][0]
- reader = getattr(self, r_handler)
- (list_type, list_len) = self.readListBegin()
- if tspec is None:
- # list values are simple types
- for idx in xrange(list_len):
- results.append(reader())
- else:
- # this is like an inlined readFieldByTType
- container_reader = self._TTYPE_HANDLERS[list_type][0]
- val_reader = getattr(self, container_reader)
- for idx in xrange(list_len):
- val = val_reader(tspec)
- results.append(val)
- self.readListEnd()
- return results
-
- def readContainerSet(self, spec):
- results = set()
- ttype, tspec = spec[0], spec[1]
- r_handler = self._TTYPE_HANDLERS[ttype][0]
- reader = getattr(self, r_handler)
- (set_type, set_len) = self.readSetBegin()
- if tspec is None:
- # set members are simple types
- for idx in xrange(set_len):
- results.add(reader())
- else:
- container_reader = self._TTYPE_HANDLERS[set_type][0]
- val_reader = getattr(self, container_reader)
- for idx in xrange(set_len):
- results.add(val_reader(tspec))
- self.readSetEnd()
- return results
-
- def readContainerStruct(self, spec):
- (obj_class, obj_spec) = spec
- obj = obj_class()
- obj.read(self)
- return obj
-
- def readContainerMap(self, spec):
- results = dict()
- key_ttype, key_spec = spec[0], spec[1]
- val_ttype, val_spec = spec[2], spec[3]
- (map_ktype, map_vtype, map_len) = self.readMapBegin()
- # TODO: compare types we just decoded with thrift_spec and abort/skip if types disagree
- key_reader = getattr(self, self._TTYPE_HANDLERS[key_ttype][0])
- val_reader = getattr(self, self._TTYPE_HANDLERS[val_ttype][0])
- # list values are simple types
- for idx in xrange(map_len):
- if key_spec is None:
- k_val = key_reader()
- else:
- k_val = self.readFieldByTType(key_ttype, key_spec)
- if val_spec is None:
- v_val = val_reader()
- else:
- v_val = self.readFieldByTType(val_ttype, val_spec)
- # this raises a TypeError with unhashable keys types. i.e. d=dict(); d[[0,1]] = 2 fails
- results[k_val] = v_val
- self.readMapEnd()
- return results
-
- def readStruct(self, obj, thrift_spec):
- self.readStructBegin()
- while True:
- (fname, ftype, fid) = self.readFieldBegin()
- if ftype == TType.STOP:
- break
- try:
- field = thrift_spec[fid]
- except IndexError:
- self.skip(ftype)
- else:
- if field is not None and ftype == field[1]:
- fname = field[2]
- fspec = field[3]
- val = self.readFieldByTType(ftype, fspec)
- setattr(obj, fname, val)
- else:
- self.skip(ftype)
- self.readFieldEnd()
- self.readStructEnd()
-
- def writeContainerStruct(self, val, spec):
- val.write(self)
-
- def writeContainerList(self, val, spec):
- self.writeListBegin(spec[0], len(val))
- r_handler, w_handler, is_container = self._TTYPE_HANDLERS[spec[0]]
- e_writer = getattr(self, w_handler)
- if not is_container:
- for elem in val:
- e_writer(elem)
- else:
- for elem in val:
- e_writer(elem, spec[1])
- self.writeListEnd()
-
- def writeContainerSet(self, val, spec):
- self.writeSetBegin(spec[0], len(val))
- r_handler, w_handler, is_container = self._TTYPE_HANDLERS[spec[0]]
- e_writer = getattr(self, w_handler)
- if not is_container:
- for elem in val:
- e_writer(elem)
- else:
- for elem in val:
- e_writer(elem, spec[1])
- self.writeSetEnd()
-
- def writeContainerMap(self, val, spec):
- k_type = spec[0]
- v_type = spec[2]
- ignore, ktype_name, k_is_container = self._TTYPE_HANDLERS[k_type]
- ignore, vtype_name, v_is_container = self._TTYPE_HANDLERS[v_type]
- k_writer = getattr(self, ktype_name)
- v_writer = getattr(self, vtype_name)
- self.writeMapBegin(k_type, v_type, len(val))
- for m_key, m_val in val.iteritems():
- if not k_is_container:
- k_writer(m_key)
- else:
- k_writer(m_key, spec[1])
- if not v_is_container:
- v_writer(m_val)
- else:
- v_writer(m_val, spec[3])
- self.writeMapEnd()
-
- def writeStruct(self, obj, thrift_spec):
- self.writeStructBegin(obj.__class__.__name__)
- for field in thrift_spec:
- if field is None:
- continue
- fname = field[2]
- val = getattr(obj, fname)
- if val is None:
- # skip writing out unset fields
- continue
- fid = field[0]
- ftype = field[1]
- fspec = field[3]
- # get the writer method for this value
- self.writeFieldBegin(fname, ftype, fid)
- self.writeFieldByTType(ftype, val, fspec)
- self.writeFieldEnd()
- self.writeFieldStop()
- self.writeStructEnd()
-
- def writeFieldByTType(self, ttype, val, spec):
- r_handler, w_handler, is_container = self._TTYPE_HANDLERS[ttype]
- writer = getattr(self, w_handler)
- if is_container:
- writer(val, spec)
- else:
- writer(val)
-
-class TProtocolFactory:
- def getProtocol(self, trans):
- pass
-
diff --git a/module/lib/thrift/protocol/__init__.py b/module/lib/thrift/protocol/__init__.py
deleted file mode 100644
index d53359b28..000000000
--- a/module/lib/thrift/protocol/__init__.py
+++ /dev/null
@@ -1,20 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-__all__ = ['TProtocol', 'TBinaryProtocol', 'fastbinary', 'TBase']
diff --git a/module/lib/thrift/server/THttpServer.py b/module/lib/thrift/server/THttpServer.py
deleted file mode 100644
index 3047d9c00..000000000
--- a/module/lib/thrift/server/THttpServer.py
+++ /dev/null
@@ -1,82 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-import BaseHTTPServer
-
-from thrift.server import TServer
-from thrift.transport import TTransport
-
-class ResponseException(Exception):
- """Allows handlers to override the HTTP response
-
- Normally, THttpServer always sends a 200 response. If a handler wants
- to override this behavior (e.g., to simulate a misconfigured or
- overloaded web server during testing), it can raise a ResponseException.
- The function passed to the constructor will be called with the
- RequestHandler as its only argument.
- """
- def __init__(self, handler):
- self.handler = handler
-
-
-class THttpServer(TServer.TServer):
- """A simple HTTP-based Thrift server
-
- This class is not very performant, but it is useful (for example) for
- acting as a mock version of an Apache-based PHP Thrift endpoint."""
-
- def __init__(self, processor, server_address,
- inputProtocolFactory, outputProtocolFactory = None,
- server_class = BaseHTTPServer.HTTPServer):
- """Set up protocol factories and HTTP server.
-
- See BaseHTTPServer for server_address.
- See TServer for protocol factories."""
-
- if outputProtocolFactory is None:
- outputProtocolFactory = inputProtocolFactory
-
- TServer.TServer.__init__(self, processor, None, None, None,
- inputProtocolFactory, outputProtocolFactory)
-
- thttpserver = self
-
- class RequestHander(BaseHTTPServer.BaseHTTPRequestHandler):
- def do_POST(self):
- # Don't care about the request path.
- itrans = TTransport.TFileObjectTransport(self.rfile)
- otrans = TTransport.TFileObjectTransport(self.wfile)
- itrans = TTransport.TBufferedTransport(itrans, int(self.headers['Content-Length']))
- otrans = TTransport.TMemoryBuffer()
- iprot = thttpserver.inputProtocolFactory.getProtocol(itrans)
- oprot = thttpserver.outputProtocolFactory.getProtocol(otrans)
- try:
- thttpserver.processor.process(iprot, oprot)
- except ResponseException, exn:
- exn.handler(self)
- else:
- self.send_response(200)
- self.send_header("content-type", "application/x-thrift")
- self.end_headers()
- self.wfile.write(otrans.getvalue())
-
- self.httpd = server_class(server_address, RequestHander)
-
- def serve(self):
- self.httpd.serve_forever()
diff --git a/module/lib/thrift/server/TNonblockingServer.py b/module/lib/thrift/server/TNonblockingServer.py
deleted file mode 100644
index ea348a0b6..000000000
--- a/module/lib/thrift/server/TNonblockingServer.py
+++ /dev/null
@@ -1,310 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-"""Implementation of non-blocking server.
-
-The main idea of the server is reciving and sending requests
-only from main thread.
-
-It also makes thread pool server in tasks terms, not connections.
-"""
-import threading
-import socket
-import Queue
-import select
-import struct
-import logging
-
-from thrift.transport import TTransport
-from thrift.protocol.TBinaryProtocol import TBinaryProtocolFactory
-
-__all__ = ['TNonblockingServer']
-
-class Worker(threading.Thread):
- """Worker is a small helper to process incoming connection."""
- def __init__(self, queue):
- threading.Thread.__init__(self)
- self.queue = queue
-
- def run(self):
- """Process queries from task queue, stop if processor is None."""
- while True:
- try:
- processor, iprot, oprot, otrans, callback = self.queue.get()
- if processor is None:
- break
- processor.process(iprot, oprot)
- callback(True, otrans.getvalue())
- except Exception:
- logging.exception("Exception while processing request")
- callback(False, '')
-
-WAIT_LEN = 0
-WAIT_MESSAGE = 1
-WAIT_PROCESS = 2
-SEND_ANSWER = 3
-CLOSED = 4
-
-def locked(func):
- "Decorator which locks self.lock."
- def nested(self, *args, **kwargs):
- self.lock.acquire()
- try:
- return func(self, *args, **kwargs)
- finally:
- self.lock.release()
- return nested
-
-def socket_exception(func):
- "Decorator close object on socket.error."
- def read(self, *args, **kwargs):
- try:
- return func(self, *args, **kwargs)
- except socket.error:
- self.close()
- return read
-
-class Connection:
- """Basic class is represented connection.
-
- It can be in state:
- WAIT_LEN --- connection is reading request len.
- WAIT_MESSAGE --- connection is reading request.
- WAIT_PROCESS --- connection has just read whole request and
- waits for call ready routine.
- SEND_ANSWER --- connection is sending answer string (including length
- of answer).
- CLOSED --- socket was closed and connection should be deleted.
- """
- def __init__(self, new_socket, wake_up):
- self.socket = new_socket
- self.socket.setblocking(False)
- self.status = WAIT_LEN
- self.len = 0
- self.message = ''
- self.lock = threading.Lock()
- self.wake_up = wake_up
-
- def _read_len(self):
- """Reads length of request.
-
- It's really paranoic routine and it may be replaced by
- self.socket.recv(4)."""
- read = self.socket.recv(4 - len(self.message))
- if len(read) == 0:
- # if we read 0 bytes and self.message is empty, it means client close
- # connection
- if len(self.message) != 0:
- logging.error("can't read frame size from socket")
- self.close()
- return
- self.message += read
- if len(self.message) == 4:
- self.len, = struct.unpack('!i', self.message)
- if self.len < 0:
- logging.error("negative frame size, it seems client"\
- " doesn't use FramedTransport")
- self.close()
- elif self.len == 0:
- logging.error("empty frame, it's really strange")
- self.close()
- else:
- self.message = ''
- self.status = WAIT_MESSAGE
-
- @socket_exception
- def read(self):
- """Reads data from stream and switch state."""
- assert self.status in (WAIT_LEN, WAIT_MESSAGE)
- if self.status == WAIT_LEN:
- self._read_len()
- # go back to the main loop here for simplicity instead of
- # falling through, even though there is a good chance that
- # the message is already available
- elif self.status == WAIT_MESSAGE:
- read = self.socket.recv(self.len - len(self.message))
- if len(read) == 0:
- logging.error("can't read frame from socket (get %d of %d bytes)" %
- (len(self.message), self.len))
- self.close()
- return
- self.message += read
- if len(self.message) == self.len:
- self.status = WAIT_PROCESS
-
- @socket_exception
- def write(self):
- """Writes data from socket and switch state."""
- assert self.status == SEND_ANSWER
- sent = self.socket.send(self.message)
- if sent == len(self.message):
- self.status = WAIT_LEN
- self.message = ''
- self.len = 0
- else:
- self.message = self.message[sent:]
-
- @locked
- def ready(self, all_ok, message):
- """Callback function for switching state and waking up main thread.
-
- This function is the only function witch can be called asynchronous.
-
- The ready can switch Connection to three states:
- WAIT_LEN if request was oneway.
- SEND_ANSWER if request was processed in normal way.
- CLOSED if request throws unexpected exception.
-
- The one wakes up main thread.
- """
- assert self.status == WAIT_PROCESS
- if not all_ok:
- self.close()
- self.wake_up()
- return
- self.len = ''
- if len(message) == 0:
- # it was a oneway request, do not write answer
- self.message = ''
- self.status = WAIT_LEN
- else:
- self.message = struct.pack('!i', len(message)) + message
- self.status = SEND_ANSWER
- self.wake_up()
-
- @locked
- def is_writeable(self):
- "Returns True if connection should be added to write list of select."
- return self.status == SEND_ANSWER
-
- # it's not necessary, but...
- @locked
- def is_readable(self):
- "Returns True if connection should be added to read list of select."
- return self.status in (WAIT_LEN, WAIT_MESSAGE)
-
- @locked
- def is_closed(self):
- "Returns True if connection is closed."
- return self.status == CLOSED
-
- def fileno(self):
- "Returns the file descriptor of the associated socket."
- return self.socket.fileno()
-
- def close(self):
- "Closes connection"
- self.status = CLOSED
- self.socket.close()
-
-class TNonblockingServer:
- """Non-blocking server."""
- def __init__(self, processor, lsocket, inputProtocolFactory=None,
- outputProtocolFactory=None, threads=10):
- self.processor = processor
- self.socket = lsocket
- self.in_protocol = inputProtocolFactory or TBinaryProtocolFactory()
- self.out_protocol = outputProtocolFactory or self.in_protocol
- self.threads = int(threads)
- self.clients = {}
- self.tasks = Queue.Queue()
- self._read, self._write = socket.socketpair()
- self.prepared = False
-
- def setNumThreads(self, num):
- """Set the number of worker threads that should be created."""
- # implement ThreadPool interface
- assert not self.prepared, "You can't change number of threads for working server"
- self.threads = num
-
- def prepare(self):
- """Prepares server for serve requests."""
- self.socket.listen()
- for _ in xrange(self.threads):
- thread = Worker(self.tasks)
- thread.setDaemon(True)
- thread.start()
- self.prepared = True
-
- def wake_up(self):
- """Wake up main thread.
-
- The server usualy waits in select call in we should terminate one.
- The simplest way is using socketpair.
-
- Select always wait to read from the first socket of socketpair.
-
- In this case, we can just write anything to the second socket from
- socketpair."""
- self._write.send('1')
-
- def _select(self):
- """Does select on open connections."""
- readable = [self.socket.handle.fileno(), self._read.fileno()]
- writable = []
- for i, connection in self.clients.items():
- if connection.is_readable():
- readable.append(connection.fileno())
- if connection.is_writeable():
- writable.append(connection.fileno())
- if connection.is_closed():
- del self.clients[i]
- return select.select(readable, writable, readable)
-
- def handle(self):
- """Handle requests.
-
- WARNING! You must call prepare BEFORE calling handle.
- """
- assert self.prepared, "You have to call prepare before handle"
- rset, wset, xset = self._select()
- for readable in rset:
- if readable == self._read.fileno():
- # don't care i just need to clean readable flag
- self._read.recv(1024)
- elif readable == self.socket.handle.fileno():
- client = self.socket.accept().handle
- self.clients[client.fileno()] = Connection(client, self.wake_up)
- else:
- connection = self.clients[readable]
- connection.read()
- if connection.status == WAIT_PROCESS:
- itransport = TTransport.TMemoryBuffer(connection.message)
- otransport = TTransport.TMemoryBuffer()
- iprot = self.in_protocol.getProtocol(itransport)
- oprot = self.out_protocol.getProtocol(otransport)
- self.tasks.put([self.processor, iprot, oprot,
- otransport, connection.ready])
- for writeable in wset:
- self.clients[writeable].write()
- for oob in xset:
- self.clients[oob].close()
- del self.clients[oob]
-
- def close(self):
- """Closes the server."""
- for _ in xrange(self.threads):
- self.tasks.put([None, None, None, None, None])
- self.socket.close()
- self.prepared = False
-
- def serve(self):
- """Serve forever."""
- self.prepare()
- while True:
- self.handle()
diff --git a/module/lib/thrift/server/TProcessPoolServer.py b/module/lib/thrift/server/TProcessPoolServer.py
deleted file mode 100644
index 7ed814a88..000000000
--- a/module/lib/thrift/server/TProcessPoolServer.py
+++ /dev/null
@@ -1,125 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-
-import logging
-from multiprocessing import Process, Value, Condition, reduction
-
-from TServer import TServer
-from thrift.transport.TTransport import TTransportException
-
-class TProcessPoolServer(TServer):
-
- """
- Server with a fixed size pool of worker subprocesses which service requests.
- Note that if you need shared state between the handlers - it's up to you!
- Written by Dvir Volk, doat.com
- """
-
- def __init__(self, * args):
- TServer.__init__(self, *args)
- self.numWorkers = 10
- self.workers = []
- self.isRunning = Value('b', False)
- self.stopCondition = Condition()
- self.postForkCallback = None
-
- def setPostForkCallback(self, callback):
- if not callable(callback):
- raise TypeError("This is not a callback!")
- self.postForkCallback = callback
-
- def setNumWorkers(self, num):
- """Set the number of worker threads that should be created"""
- self.numWorkers = num
-
- def workerProcess(self):
- """Loop around getting clients from the shared queue and process them."""
-
- if self.postForkCallback:
- self.postForkCallback()
-
- while self.isRunning.value == True:
- try:
- client = self.serverTransport.accept()
- self.serveClient(client)
- except (KeyboardInterrupt, SystemExit):
- return 0
- except Exception, x:
- logging.exception(x)
-
- def serveClient(self, client):
- """Process input/output from a client for as long as possible"""
- itrans = self.inputTransportFactory.getTransport(client)
- otrans = self.outputTransportFactory.getTransport(client)
- iprot = self.inputProtocolFactory.getProtocol(itrans)
- oprot = self.outputProtocolFactory.getProtocol(otrans)
-
- try:
- while True:
- self.processor.process(iprot, oprot)
- except TTransportException, tx:
- pass
- except Exception, x:
- logging.exception(x)
-
- itrans.close()
- otrans.close()
-
-
- def serve(self):
- """Start a fixed number of worker threads and put client into a queue"""
-
- #this is a shared state that can tell the workers to exit when set as false
- self.isRunning.value = True
-
- #first bind and listen to the port
- self.serverTransport.listen()
-
- #fork the children
- for i in range(self.numWorkers):
- try:
- w = Process(target=self.workerProcess)
- w.daemon = True
- w.start()
- self.workers.append(w)
- except Exception, x:
- logging.exception(x)
-
- #wait until the condition is set by stop()
-
- while True:
-
- self.stopCondition.acquire()
- try:
- self.stopCondition.wait()
- break
- except (SystemExit, KeyboardInterrupt):
- break
- except Exception, x:
- logging.exception(x)
-
- self.isRunning.value = False
-
- def stop(self):
- self.isRunning.value = False
- self.stopCondition.acquire()
- self.stopCondition.notify()
- self.stopCondition.release()
-
diff --git a/module/lib/thrift/server/TServer.py b/module/lib/thrift/server/TServer.py
deleted file mode 100644
index 8456e2d40..000000000
--- a/module/lib/thrift/server/TServer.py
+++ /dev/null
@@ -1,274 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-import logging
-import sys
-import os
-import traceback
-import threading
-import Queue
-
-from thrift.Thrift import TProcessor
-from thrift.transport import TTransport
-from thrift.protocol import TBinaryProtocol
-
-class TServer:
-
- """Base interface for a server, which must have a serve method."""
-
- """ 3 constructors for all servers:
- 1) (processor, serverTransport)
- 2) (processor, serverTransport, transportFactory, protocolFactory)
- 3) (processor, serverTransport,
- inputTransportFactory, outputTransportFactory,
- inputProtocolFactory, outputProtocolFactory)"""
- def __init__(self, *args):
- if (len(args) == 2):
- self.__initArgs__(args[0], args[1],
- TTransport.TTransportFactoryBase(),
- TTransport.TTransportFactoryBase(),
- TBinaryProtocol.TBinaryProtocolFactory(),
- TBinaryProtocol.TBinaryProtocolFactory())
- elif (len(args) == 4):
- self.__initArgs__(args[0], args[1], args[2], args[2], args[3], args[3])
- elif (len(args) == 6):
- self.__initArgs__(args[0], args[1], args[2], args[3], args[4], args[5])
-
- def __initArgs__(self, processor, serverTransport,
- inputTransportFactory, outputTransportFactory,
- inputProtocolFactory, outputProtocolFactory):
- self.processor = processor
- self.serverTransport = serverTransport
- self.inputTransportFactory = inputTransportFactory
- self.outputTransportFactory = outputTransportFactory
- self.inputProtocolFactory = inputProtocolFactory
- self.outputProtocolFactory = outputProtocolFactory
-
- def serve(self):
- pass
-
-class TSimpleServer(TServer):
-
- """Simple single-threaded server that just pumps around one transport."""
-
- def __init__(self, *args):
- TServer.__init__(self, *args)
-
- def serve(self):
- self.serverTransport.listen()
- while True:
- client = self.serverTransport.accept()
- itrans = self.inputTransportFactory.getTransport(client)
- otrans = self.outputTransportFactory.getTransport(client)
- iprot = self.inputProtocolFactory.getProtocol(itrans)
- oprot = self.outputProtocolFactory.getProtocol(otrans)
- try:
- while True:
- self.processor.process(iprot, oprot)
- except TTransport.TTransportException, tx:
- pass
- except Exception, x:
- logging.exception(x)
-
- itrans.close()
- otrans.close()
-
-class TThreadedServer(TServer):
-
- """Threaded server that spawns a new thread per each connection."""
-
- def __init__(self, *args, **kwargs):
- TServer.__init__(self, *args)
- self.daemon = kwargs.get("daemon", False)
-
- def serve(self):
- self.serverTransport.listen()
- while True:
- try:
- client = self.serverTransport.accept()
- t = threading.Thread(target = self.handle, args=(client,))
- t.setDaemon(self.daemon)
- t.start()
- except KeyboardInterrupt:
- raise
- except Exception, x:
- logging.exception(x)
-
- def handle(self, client):
- itrans = self.inputTransportFactory.getTransport(client)
- otrans = self.outputTransportFactory.getTransport(client)
- iprot = self.inputProtocolFactory.getProtocol(itrans)
- oprot = self.outputProtocolFactory.getProtocol(otrans)
- try:
- while True:
- self.processor.process(iprot, oprot)
- except TTransport.TTransportException, tx:
- pass
- except Exception, x:
- logging.exception(x)
-
- itrans.close()
- otrans.close()
-
-class TThreadPoolServer(TServer):
-
- """Server with a fixed size pool of threads which service requests."""
-
- def __init__(self, *args, **kwargs):
- TServer.__init__(self, *args)
- self.clients = Queue.Queue()
- self.threads = 10
- self.daemon = kwargs.get("daemon", False)
-
- def setNumThreads(self, num):
- """Set the number of worker threads that should be created"""
- self.threads = num
-
- def serveThread(self):
- """Loop around getting clients from the shared queue and process them."""
- while True:
- try:
- client = self.clients.get()
- self.serveClient(client)
- except Exception, x:
- logging.exception(x)
-
- def serveClient(self, client):
- """Process input/output from a client for as long as possible"""
- itrans = self.inputTransportFactory.getTransport(client)
- otrans = self.outputTransportFactory.getTransport(client)
- iprot = self.inputProtocolFactory.getProtocol(itrans)
- oprot = self.outputProtocolFactory.getProtocol(otrans)
- try:
- while True:
- self.processor.process(iprot, oprot)
- except TTransport.TTransportException, tx:
- pass
- except Exception, x:
- logging.exception(x)
-
- itrans.close()
- otrans.close()
-
- def serve(self):
- """Start a fixed number of worker threads and put client into a queue"""
- for i in range(self.threads):
- try:
- t = threading.Thread(target = self.serveThread)
- t.setDaemon(self.daemon)
- t.start()
- except Exception, x:
- logging.exception(x)
-
- # Pump the socket for clients
- self.serverTransport.listen()
- while True:
- try:
- client = self.serverTransport.accept()
- self.clients.put(client)
- except Exception, x:
- logging.exception(x)
-
-
-class TForkingServer(TServer):
-
- """A Thrift server that forks a new process for each request"""
- """
- This is more scalable than the threaded server as it does not cause
- GIL contention.
-
- Note that this has different semantics from the threading server.
- Specifically, updates to shared variables will no longer be shared.
- It will also not work on windows.
-
- This code is heavily inspired by SocketServer.ForkingMixIn in the
- Python stdlib.
- """
-
- def __init__(self, *args):
- TServer.__init__(self, *args)
- self.children = []
-
- def serve(self):
- def try_close(file):
- try:
- file.close()
- except IOError, e:
- logging.warning(e, exc_info=True)
-
-
- self.serverTransport.listen()
- while True:
- client = self.serverTransport.accept()
- try:
- pid = os.fork()
-
- if pid: # parent
- # add before collect, otherwise you race w/ waitpid
- self.children.append(pid)
- self.collect_children()
-
- # Parent must close socket or the connection may not get
- # closed promptly
- itrans = self.inputTransportFactory.getTransport(client)
- otrans = self.outputTransportFactory.getTransport(client)
- try_close(itrans)
- try_close(otrans)
- else:
- itrans = self.inputTransportFactory.getTransport(client)
- otrans = self.outputTransportFactory.getTransport(client)
-
- iprot = self.inputProtocolFactory.getProtocol(itrans)
- oprot = self.outputProtocolFactory.getProtocol(otrans)
-
- ecode = 0
- try:
- try:
- while True:
- self.processor.process(iprot, oprot)
- except TTransport.TTransportException, tx:
- pass
- except Exception, e:
- logging.exception(e)
- ecode = 1
- finally:
- try_close(itrans)
- try_close(otrans)
-
- os._exit(ecode)
-
- except TTransport.TTransportException, tx:
- pass
- except Exception, x:
- logging.exception(x)
-
-
- def collect_children(self):
- while self.children:
- try:
- pid, status = os.waitpid(0, os.WNOHANG)
- except os.error:
- pid = None
-
- if pid:
- self.children.remove(pid)
- else:
- break
-
-
diff --git a/module/lib/thrift/server/__init__.py b/module/lib/thrift/server/__init__.py
deleted file mode 100644
index 1bf6e254e..000000000
--- a/module/lib/thrift/server/__init__.py
+++ /dev/null
@@ -1,20 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-__all__ = ['TServer', 'TNonblockingServer']
diff --git a/module/lib/thrift/transport/THttpClient.py b/module/lib/thrift/transport/THttpClient.py
deleted file mode 100644
index 50269785c..000000000
--- a/module/lib/thrift/transport/THttpClient.py
+++ /dev/null
@@ -1,126 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-from TTransport import *
-from cStringIO import StringIO
-
-import urlparse
-import httplib
-import warnings
-import socket
-
-class THttpClient(TTransportBase):
-
- """Http implementation of TTransport base."""
-
- def __init__(self, uri_or_host, port=None, path=None):
- """THttpClient supports two different types constructor parameters.
-
- THttpClient(host, port, path) - deprecated
- THttpClient(uri)
-
- Only the second supports https."""
-
- if port is not None:
- warnings.warn("Please use the THttpClient('http://host:port/path') syntax", DeprecationWarning, stacklevel=2)
- self.host = uri_or_host
- self.port = port
- assert path
- self.path = path
- self.scheme = 'http'
- else:
- parsed = urlparse.urlparse(uri_or_host)
- self.scheme = parsed.scheme
- assert self.scheme in ('http', 'https')
- if self.scheme == 'http':
- self.port = parsed.port or httplib.HTTP_PORT
- elif self.scheme == 'https':
- self.port = parsed.port or httplib.HTTPS_PORT
- self.host = parsed.hostname
- self.path = parsed.path
- if parsed.query:
- self.path += '?%s' % parsed.query
- self.__wbuf = StringIO()
- self.__http = None
- self.__timeout = None
-
- def open(self):
- if self.scheme == 'http':
- self.__http = httplib.HTTP(self.host, self.port)
- else:
- self.__http = httplib.HTTPS(self.host, self.port)
-
- def close(self):
- self.__http.close()
- self.__http = None
-
- def isOpen(self):
- return self.__http != None
-
- def setTimeout(self, ms):
- if not hasattr(socket, 'getdefaulttimeout'):
- raise NotImplementedError
-
- if ms is None:
- self.__timeout = None
- else:
- self.__timeout = ms/1000.0
-
- def read(self, sz):
- return self.__http.file.read(sz)
-
- def write(self, buf):
- self.__wbuf.write(buf)
-
- def __withTimeout(f):
- def _f(*args, **kwargs):
- orig_timeout = socket.getdefaulttimeout()
- socket.setdefaulttimeout(args[0].__timeout)
- result = f(*args, **kwargs)
- socket.setdefaulttimeout(orig_timeout)
- return result
- return _f
-
- def flush(self):
- if self.isOpen():
- self.close()
- self.open();
-
- # Pull data out of buffer
- data = self.__wbuf.getvalue()
- self.__wbuf = StringIO()
-
- # HTTP request
- self.__http.putrequest('POST', self.path)
-
- # Write headers
- self.__http.putheader('Host', self.host)
- self.__http.putheader('Content-Type', 'application/x-thrift')
- self.__http.putheader('Content-Length', str(len(data)))
- self.__http.endheaders()
-
- # Write payload
- self.__http.send(data)
-
- # Get reply to flush the request
- self.code, self.message, self.headers = self.__http.getreply()
-
- # Decorate if we know how to timeout
- if hasattr(socket, 'getdefaulttimeout'):
- flush = __withTimeout(flush)
diff --git a/module/lib/thrift/transport/TSocket.py b/module/lib/thrift/transport/TSocket.py
deleted file mode 100644
index 4e0e1874f..000000000
--- a/module/lib/thrift/transport/TSocket.py
+++ /dev/null
@@ -1,163 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-from TTransport import *
-import os
-import errno
-import socket
-import sys
-
-class TSocketBase(TTransportBase):
- def _resolveAddr(self):
- if self._unix_socket is not None:
- return [(socket.AF_UNIX, socket.SOCK_STREAM, None, None, self._unix_socket)]
- else:
- return socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM, 0, socket.AI_PASSIVE | socket.AI_ADDRCONFIG)
-
- def close(self):
- if self.handle:
- self.handle.close()
- self.handle = None
-
-class TSocket(TSocketBase):
- """Socket implementation of TTransport base."""
-
- def __init__(self, host='localhost', port=9090, unix_socket=None):
- """Initialize a TSocket
-
- @param host(str) The host to connect to.
- @param port(int) The (TCP) port to connect to.
- @param unix_socket(str) The filename of a unix socket to connect to.
- (host and port will be ignored.)
- """
-
- self.host = host
- self.port = port
- self.handle = None
- self._unix_socket = unix_socket
- self._timeout = None
-
- def setHandle(self, h):
- self.handle = h
-
- def isOpen(self):
- return self.handle is not None
-
- def setTimeout(self, ms):
- if ms is None:
- self._timeout = None
- else:
- self._timeout = ms/1000.0
-
- if self.handle is not None:
- self.handle.settimeout(self._timeout)
-
- def open(self):
- try:
- res0 = self._resolveAddr()
- for res in res0:
- self.handle = socket.socket(res[0], res[1])
- self.handle.settimeout(self._timeout)
- try:
- self.handle.connect(res[4])
- except socket.error, e:
- if res is not res0[-1]:
- continue
- else:
- raise e
- break
- except socket.error, e:
- if self._unix_socket:
- message = 'Could not connect to socket %s' % self._unix_socket
- else:
- message = 'Could not connect to %s:%d' % (self.host, self.port)
- raise TTransportException(type=TTransportException.NOT_OPEN, message=message)
-
- def read(self, sz):
- try:
- buff = self.handle.recv(sz)
- except socket.error, e:
- if (e.args[0] == errno.ECONNRESET and
- (sys.platform == 'darwin' or sys.platform.startswith('freebsd'))):
- # freebsd and Mach don't follow POSIX semantic of recv
- # and fail with ECONNRESET if peer performed shutdown.
- # See corresponding comment and code in TSocket::read()
- # in lib/cpp/src/transport/TSocket.cpp.
- self.close()
- # Trigger the check to raise the END_OF_FILE exception below.
- buff = ''
- else:
- raise
- if len(buff) == 0:
- raise TTransportException(type=TTransportException.END_OF_FILE, message='TSocket read 0 bytes')
- return buff
-
- def write(self, buff):
- if not self.handle:
- raise TTransportException(type=TTransportException.NOT_OPEN, message='Transport not open')
- sent = 0
- have = len(buff)
- while sent < have:
- plus = self.handle.send(buff)
- if plus == 0:
- raise TTransportException(type=TTransportException.END_OF_FILE, message='TSocket sent 0 bytes')
- sent += plus
- buff = buff[plus:]
-
- def flush(self):
- pass
-
-class TServerSocket(TSocketBase, TServerTransportBase):
- """Socket implementation of TServerTransport base."""
-
- def __init__(self, host=None, port=9090, unix_socket=None):
- self.host = host
- self.port = port
- self._unix_socket = unix_socket
- self.handle = None
-
- def listen(self):
- res0 = self._resolveAddr()
- for res in res0:
- if res[0] is socket.AF_INET6 or res is res0[-1]:
- break
-
- # We need remove the old unix socket if the file exists and
- # nobody is listening on it.
- if self._unix_socket:
- tmp = socket.socket(res[0], res[1])
- try:
- tmp.connect(res[4])
- except socket.error, err:
- eno, message = err.args
- if eno == errno.ECONNREFUSED:
- os.unlink(res[4])
-
- self.handle = socket.socket(res[0], res[1])
- self.handle.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- if hasattr(self.handle, 'settimeout'):
- self.handle.settimeout(None)
- self.handle.bind(res[4])
- self.handle.listen(128)
-
- def accept(self):
- client, addr = self.handle.accept()
- result = TSocket()
- result.setHandle(client)
- return result
diff --git a/module/lib/thrift/transport/TTransport.py b/module/lib/thrift/transport/TTransport.py
deleted file mode 100644
index 12e51a9bf..000000000
--- a/module/lib/thrift/transport/TTransport.py
+++ /dev/null
@@ -1,331 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-from cStringIO import StringIO
-from struct import pack,unpack
-from thrift.Thrift import TException
-
-class TTransportException(TException):
-
- """Custom Transport Exception class"""
-
- UNKNOWN = 0
- NOT_OPEN = 1
- ALREADY_OPEN = 2
- TIMED_OUT = 3
- END_OF_FILE = 4
-
- def __init__(self, type=UNKNOWN, message=None):
- TException.__init__(self, message)
- self.type = type
-
-class TTransportBase:
-
- """Base class for Thrift transport layer."""
-
- def isOpen(self):
- pass
-
- def open(self):
- pass
-
- def close(self):
- pass
-
- def read(self, sz):
- pass
-
- def readAll(self, sz):
- buff = ''
- have = 0
- while (have < sz):
- chunk = self.read(sz-have)
- have += len(chunk)
- buff += chunk
-
- if len(chunk) == 0:
- raise EOFError()
-
- return buff
-
- def write(self, buf):
- pass
-
- def flush(self):
- pass
-
-# This class should be thought of as an interface.
-class CReadableTransport:
- """base class for transports that are readable from C"""
-
- # TODO(dreiss): Think about changing this interface to allow us to use
- # a (Python, not c) StringIO instead, because it allows
- # you to write after reading.
-
- # NOTE: This is a classic class, so properties will NOT work
- # correctly for setting.
- @property
- def cstringio_buf(self):
- """A cStringIO buffer that contains the current chunk we are reading."""
- pass
-
- def cstringio_refill(self, partialread, reqlen):
- """Refills cstringio_buf.
-
- Returns the currently used buffer (which can but need not be the same as
- the old cstringio_buf). partialread is what the C code has read from the
- buffer, and should be inserted into the buffer before any more reads. The
- return value must be a new, not borrowed reference. Something along the
- lines of self._buf should be fine.
-
- If reqlen bytes can't be read, throw EOFError.
- """
- pass
-
-class TServerTransportBase:
-
- """Base class for Thrift server transports."""
-
- def listen(self):
- pass
-
- def accept(self):
- pass
-
- def close(self):
- pass
-
-class TTransportFactoryBase:
-
- """Base class for a Transport Factory"""
-
- def getTransport(self, trans):
- return trans
-
-class TBufferedTransportFactory:
-
- """Factory transport that builds buffered transports"""
-
- def getTransport(self, trans):
- buffered = TBufferedTransport(trans)
- return buffered
-
-
-class TBufferedTransport(TTransportBase,CReadableTransport):
-
- """Class that wraps another transport and buffers its I/O.
-
- The implementation uses a (configurable) fixed-size read buffer
- but buffers all writes until a flush is performed.
- """
-
- DEFAULT_BUFFER = 4096
-
- def __init__(self, trans, rbuf_size = DEFAULT_BUFFER):
- self.__trans = trans
- self.__wbuf = StringIO()
- self.__rbuf = StringIO("")
- self.__rbuf_size = rbuf_size
-
- def isOpen(self):
- return self.__trans.isOpen()
-
- def open(self):
- return self.__trans.open()
-
- def close(self):
- return self.__trans.close()
-
- def read(self, sz):
- ret = self.__rbuf.read(sz)
- if len(ret) != 0:
- return ret
-
- self.__rbuf = StringIO(self.__trans.read(max(sz, self.__rbuf_size)))
- return self.__rbuf.read(sz)
-
- def write(self, buf):
- self.__wbuf.write(buf)
-
- def flush(self):
- out = self.__wbuf.getvalue()
- # reset wbuf before write/flush to preserve state on underlying failure
- self.__wbuf = StringIO()
- self.__trans.write(out)
- self.__trans.flush()
-
- # Implement the CReadableTransport interface.
- @property
- def cstringio_buf(self):
- return self.__rbuf
-
- def cstringio_refill(self, partialread, reqlen):
- retstring = partialread
- if reqlen < self.__rbuf_size:
- # try to make a read of as much as we can.
- retstring += self.__trans.read(self.__rbuf_size)
-
- # but make sure we do read reqlen bytes.
- if len(retstring) < reqlen:
- retstring += self.__trans.readAll(reqlen - len(retstring))
-
- self.__rbuf = StringIO(retstring)
- return self.__rbuf
-
-class TMemoryBuffer(TTransportBase, CReadableTransport):
- """Wraps a cStringIO object as a TTransport.
-
- NOTE: Unlike the C++ version of this class, you cannot write to it
- then immediately read from it. If you want to read from a
- TMemoryBuffer, you must either pass a string to the constructor.
- TODO(dreiss): Make this work like the C++ version.
- """
-
- def __init__(self, value=None):
- """value -- a value to read from for stringio
-
- If value is set, this will be a transport for reading,
- otherwise, it is for writing"""
- if value is not None:
- self._buffer = StringIO(value)
- else:
- self._buffer = StringIO()
-
- def isOpen(self):
- return not self._buffer.closed
-
- def open(self):
- pass
-
- def close(self):
- self._buffer.close()
-
- def read(self, sz):
- return self._buffer.read(sz)
-
- def write(self, buf):
- self._buffer.write(buf)
-
- def flush(self):
- pass
-
- def getvalue(self):
- return self._buffer.getvalue()
-
- # Implement the CReadableTransport interface.
- @property
- def cstringio_buf(self):
- return self._buffer
-
- def cstringio_refill(self, partialread, reqlen):
- # only one shot at reading...
- raise EOFError()
-
-class TFramedTransportFactory:
-
- """Factory transport that builds framed transports"""
-
- def getTransport(self, trans):
- framed = TFramedTransport(trans)
- return framed
-
-
-class TFramedTransport(TTransportBase, CReadableTransport):
-
- """Class that wraps another transport and frames its I/O when writing."""
-
- def __init__(self, trans,):
- self.__trans = trans
- self.__rbuf = StringIO()
- self.__wbuf = StringIO()
-
- def isOpen(self):
- return self.__trans.isOpen()
-
- def open(self):
- return self.__trans.open()
-
- def close(self):
- return self.__trans.close()
-
- def read(self, sz):
- ret = self.__rbuf.read(sz)
- if len(ret) != 0:
- return ret
-
- self.readFrame()
- return self.__rbuf.read(sz)
-
- def readFrame(self):
- buff = self.__trans.readAll(4)
- sz, = unpack('!i', buff)
- self.__rbuf = StringIO(self.__trans.readAll(sz))
-
- def write(self, buf):
- self.__wbuf.write(buf)
-
- def flush(self):
- wout = self.__wbuf.getvalue()
- wsz = len(wout)
- # reset wbuf before write/flush to preserve state on underlying failure
- self.__wbuf = StringIO()
- # N.B.: Doing this string concatenation is WAY cheaper than making
- # two separate calls to the underlying socket object. Socket writes in
- # Python turn out to be REALLY expensive, but it seems to do a pretty
- # good job of managing string buffer operations without excessive copies
- buf = pack("!i", wsz) + wout
- self.__trans.write(buf)
- self.__trans.flush()
-
- # Implement the CReadableTransport interface.
- @property
- def cstringio_buf(self):
- return self.__rbuf
-
- def cstringio_refill(self, prefix, reqlen):
- # self.__rbuf will already be empty here because fastbinary doesn't
- # ask for a refill until the previous buffer is empty. Therefore,
- # we can start reading new frames immediately.
- while len(prefix) < reqlen:
- self.readFrame()
- prefix += self.__rbuf.getvalue()
- self.__rbuf = StringIO(prefix)
- return self.__rbuf
-
-
-class TFileObjectTransport(TTransportBase):
- """Wraps a file-like object to make it work as a Thrift transport."""
-
- def __init__(self, fileobj):
- self.fileobj = fileobj
-
- def isOpen(self):
- return True
-
- def close(self):
- self.fileobj.close()
-
- def read(self, sz):
- return self.fileobj.read(sz)
-
- def write(self, buf):
- self.fileobj.write(buf)
-
- def flush(self):
- self.fileobj.flush()
diff --git a/module/lib/thrift/transport/TTwisted.py b/module/lib/thrift/transport/TTwisted.py
deleted file mode 100644
index b6dcb4e0b..000000000
--- a/module/lib/thrift/transport/TTwisted.py
+++ /dev/null
@@ -1,219 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-from zope.interface import implements, Interface, Attribute
-from twisted.internet.protocol import Protocol, ServerFactory, ClientFactory, \
- connectionDone
-from twisted.internet import defer
-from twisted.protocols import basic
-from twisted.python import log
-from twisted.web import server, resource, http
-
-from thrift.transport import TTransport
-from cStringIO import StringIO
-
-
-class TMessageSenderTransport(TTransport.TTransportBase):
-
- def __init__(self):
- self.__wbuf = StringIO()
-
- def write(self, buf):
- self.__wbuf.write(buf)
-
- def flush(self):
- msg = self.__wbuf.getvalue()
- self.__wbuf = StringIO()
- self.sendMessage(msg)
-
- def sendMessage(self, message):
- raise NotImplementedError
-
-
-class TCallbackTransport(TMessageSenderTransport):
-
- def __init__(self, func):
- TMessageSenderTransport.__init__(self)
- self.func = func
-
- def sendMessage(self, message):
- self.func(message)
-
-
-class ThriftClientProtocol(basic.Int32StringReceiver):
-
- MAX_LENGTH = 2 ** 31 - 1
-
- def __init__(self, client_class, iprot_factory, oprot_factory=None):
- self._client_class = client_class
- self._iprot_factory = iprot_factory
- if oprot_factory is None:
- self._oprot_factory = iprot_factory
- else:
- self._oprot_factory = oprot_factory
-
- self.recv_map = {}
- self.started = defer.Deferred()
-
- def dispatch(self, msg):
- self.sendString(msg)
-
- def connectionMade(self):
- tmo = TCallbackTransport(self.dispatch)
- self.client = self._client_class(tmo, self._oprot_factory)
- self.started.callback(self.client)
-
- def connectionLost(self, reason=connectionDone):
- for k,v in self.client._reqs.iteritems():
- tex = TTransport.TTransportException(
- type=TTransport.TTransportException.END_OF_FILE,
- message='Connection closed')
- v.errback(tex)
-
- def stringReceived(self, frame):
- tr = TTransport.TMemoryBuffer(frame)
- iprot = self._iprot_factory.getProtocol(tr)
- (fname, mtype, rseqid) = iprot.readMessageBegin()
-
- try:
- method = self.recv_map[fname]
- except KeyError:
- method = getattr(self.client, 'recv_' + fname)
- self.recv_map[fname] = method
-
- method(iprot, mtype, rseqid)
-
-
-class ThriftServerProtocol(basic.Int32StringReceiver):
-
- MAX_LENGTH = 2 ** 31 - 1
-
- def dispatch(self, msg):
- self.sendString(msg)
-
- def processError(self, error):
- self.transport.loseConnection()
-
- def processOk(self, _, tmo):
- msg = tmo.getvalue()
-
- if len(msg) > 0:
- self.dispatch(msg)
-
- def stringReceived(self, frame):
- tmi = TTransport.TMemoryBuffer(frame)
- tmo = TTransport.TMemoryBuffer()
-
- iprot = self.factory.iprot_factory.getProtocol(tmi)
- oprot = self.factory.oprot_factory.getProtocol(tmo)
-
- d = self.factory.processor.process(iprot, oprot)
- d.addCallbacks(self.processOk, self.processError,
- callbackArgs=(tmo,))
-
-
-class IThriftServerFactory(Interface):
-
- processor = Attribute("Thrift processor")
-
- iprot_factory = Attribute("Input protocol factory")
-
- oprot_factory = Attribute("Output protocol factory")
-
-
-class IThriftClientFactory(Interface):
-
- client_class = Attribute("Thrift client class")
-
- iprot_factory = Attribute("Input protocol factory")
-
- oprot_factory = Attribute("Output protocol factory")
-
-
-class ThriftServerFactory(ServerFactory):
-
- implements(IThriftServerFactory)
-
- protocol = ThriftServerProtocol
-
- def __init__(self, processor, iprot_factory, oprot_factory=None):
- self.processor = processor
- self.iprot_factory = iprot_factory
- if oprot_factory is None:
- self.oprot_factory = iprot_factory
- else:
- self.oprot_factory = oprot_factory
-
-
-class ThriftClientFactory(ClientFactory):
-
- implements(IThriftClientFactory)
-
- protocol = ThriftClientProtocol
-
- def __init__(self, client_class, iprot_factory, oprot_factory=None):
- self.client_class = client_class
- self.iprot_factory = iprot_factory
- if oprot_factory is None:
- self.oprot_factory = iprot_factory
- else:
- self.oprot_factory = oprot_factory
-
- def buildProtocol(self, addr):
- p = self.protocol(self.client_class, self.iprot_factory,
- self.oprot_factory)
- p.factory = self
- return p
-
-
-class ThriftResource(resource.Resource):
-
- allowedMethods = ('POST',)
-
- def __init__(self, processor, inputProtocolFactory,
- outputProtocolFactory=None):
- resource.Resource.__init__(self)
- self.inputProtocolFactory = inputProtocolFactory
- if outputProtocolFactory is None:
- self.outputProtocolFactory = inputProtocolFactory
- else:
- self.outputProtocolFactory = outputProtocolFactory
- self.processor = processor
-
- def getChild(self, path, request):
- return self
-
- def _cbProcess(self, _, request, tmo):
- msg = tmo.getvalue()
- request.setResponseCode(http.OK)
- request.setHeader("content-type", "application/x-thrift")
- request.write(msg)
- request.finish()
-
- def render_POST(self, request):
- request.content.seek(0, 0)
- data = request.content.read()
- tmi = TTransport.TMemoryBuffer(data)
- tmo = TTransport.TMemoryBuffer()
-
- iprot = self.inputProtocolFactory.getProtocol(tmi)
- oprot = self.outputProtocolFactory.getProtocol(tmo)
-
- d = self.processor.process(iprot, oprot)
- d.addCallback(self._cbProcess, request, tmo)
- return server.NOT_DONE_YET
diff --git a/module/lib/thrift/transport/TZlibTransport.py b/module/lib/thrift/transport/TZlibTransport.py
deleted file mode 100644
index 784d4e1e0..000000000
--- a/module/lib/thrift/transport/TZlibTransport.py
+++ /dev/null
@@ -1,261 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-'''
-TZlibTransport provides a compressed transport and transport factory
-class, using the python standard library zlib module to implement
-data compression.
-'''
-
-from __future__ import division
-import zlib
-from cStringIO import StringIO
-from TTransport import TTransportBase, CReadableTransport
-
-class TZlibTransportFactory(object):
- '''
- Factory transport that builds zlib compressed transports.
-
- This factory caches the last single client/transport that it was passed
- and returns the same TZlibTransport object that was created.
-
- This caching means the TServer class will get the _same_ transport
- object for both input and output transports from this factory.
- (For non-threaded scenarios only, since the cache only holds one object)
-
- The purpose of this caching is to allocate only one TZlibTransport where
- only one is really needed (since it must have separate read/write buffers),
- and makes the statistics from getCompSavings() and getCompRatio()
- easier to understand.
- '''
-
- # class scoped cache of last transport given and zlibtransport returned
- _last_trans = None
- _last_z = None
-
- def getTransport(self, trans, compresslevel=9):
- '''Wrap a transport , trans, with the TZlibTransport
- compressed transport class, returning a new
- transport to the caller.
-
- @param compresslevel: The zlib compression level, ranging
- from 0 (no compression) to 9 (best compression). Defaults to 9.
- @type compresslevel: int
-
- This method returns a TZlibTransport which wraps the
- passed C{trans} TTransport derived instance.
- '''
- if trans == self._last_trans:
- return self._last_z
- ztrans = TZlibTransport(trans, compresslevel)
- self._last_trans = trans
- self._last_z = ztrans
- return ztrans
-
-
-class TZlibTransport(TTransportBase, CReadableTransport):
- '''
- Class that wraps a transport with zlib, compressing writes
- and decompresses reads, using the python standard
- library zlib module.
- '''
-
- # Read buffer size for the python fastbinary C extension,
- # the TBinaryProtocolAccelerated class.
- DEFAULT_BUFFSIZE = 4096
-
- def __init__(self, trans, compresslevel=9):
- '''
- Create a new TZlibTransport, wrapping C{trans}, another
- TTransport derived object.
-
- @param trans: A thrift transport object, i.e. a TSocket() object.
- @type trans: TTransport
- @param compresslevel: The zlib compression level, ranging
- from 0 (no compression) to 9 (best compression). Default is 9.
- @type compresslevel: int
- '''
- self.__trans = trans
- self.compresslevel = compresslevel
- self.__rbuf = StringIO()
- self.__wbuf = StringIO()
- self._init_zlib()
- self._init_stats()
-
- def _reinit_buffers(self):
- '''
- Internal method to initialize/reset the internal StringIO objects
- for read and write buffers.
- '''
- self.__rbuf = StringIO()
- self.__wbuf = StringIO()
-
- def _init_stats(self):
- '''
- Internal method to reset the internal statistics counters
- for compression ratios and bandwidth savings.
- '''
- self.bytes_in = 0
- self.bytes_out = 0
- self.bytes_in_comp = 0
- self.bytes_out_comp = 0
-
- def _init_zlib(self):
- '''
- Internal method for setting up the zlib compression and
- decompression objects.
- '''
- self._zcomp_read = zlib.decompressobj()
- self._zcomp_write = zlib.compressobj(self.compresslevel)
-
- def getCompRatio(self):
- '''
- Get the current measured compression ratios (in,out) from
- this transport.
-
- Returns a tuple of:
- (inbound_compression_ratio, outbound_compression_ratio)
-
- The compression ratios are computed as:
- compressed / uncompressed
-
- E.g., data that compresses by 10x will have a ratio of: 0.10
- and data that compresses to half of ts original size will
- have a ratio of 0.5
-
- None is returned if no bytes have yet been processed in
- a particular direction.
- '''
- r_percent, w_percent = (None, None)
- if self.bytes_in > 0:
- r_percent = self.bytes_in_comp / self.bytes_in
- if self.bytes_out > 0:
- w_percent = self.bytes_out_comp / self.bytes_out
- return (r_percent, w_percent)
-
- def getCompSavings(self):
- '''
- Get the current count of saved bytes due to data
- compression.
-
- Returns a tuple of:
- (inbound_saved_bytes, outbound_saved_bytes)
-
- Note: if compression is actually expanding your
- data (only likely with very tiny thrift objects), then
- the values returned will be negative.
- '''
- r_saved = self.bytes_in - self.bytes_in_comp
- w_saved = self.bytes_out - self.bytes_out_comp
- return (r_saved, w_saved)
-
- def isOpen(self):
- '''Return the underlying transport's open status'''
- return self.__trans.isOpen()
-
- def open(self):
- """Open the underlying transport"""
- self._init_stats()
- return self.__trans.open()
-
- def listen(self):
- '''Invoke the underlying transport's listen() method'''
- self.__trans.listen()
-
- def accept(self):
- '''Accept connections on the underlying transport'''
- return self.__trans.accept()
-
- def close(self):
- '''Close the underlying transport,'''
- self._reinit_buffers()
- self._init_zlib()
- return self.__trans.close()
-
- def read(self, sz):
- '''
- Read up to sz bytes from the decompressed bytes buffer, and
- read from the underlying transport if the decompression
- buffer is empty.
- '''
- ret = self.__rbuf.read(sz)
- if len(ret) > 0:
- return ret
- # keep reading from transport until something comes back
- while True:
- if self.readComp(sz):
- break
- ret = self.__rbuf.read(sz)
- return ret
-
- def readComp(self, sz):
- '''
- Read compressed data from the underlying transport, then
- decompress it and append it to the internal StringIO read buffer
- '''
- zbuf = self.__trans.read(sz)
- zbuf = self._zcomp_read.unconsumed_tail + zbuf
- buf = self._zcomp_read.decompress(zbuf)
- self.bytes_in += len(zbuf)
- self.bytes_in_comp += len(buf)
- old = self.__rbuf.read()
- self.__rbuf = StringIO(old + buf)
- if len(old) + len(buf) == 0:
- return False
- return True
-
- def write(self, buf):
- '''
- Write some bytes, putting them into the internal write
- buffer for eventual compression.
- '''
- self.__wbuf.write(buf)
-
- def flush(self):
- '''
- Flush any queued up data in the write buffer and ensure the
- compression buffer is flushed out to the underlying transport
- '''
- wout = self.__wbuf.getvalue()
- if len(wout) > 0:
- zbuf = self._zcomp_write.compress(wout)
- self.bytes_out += len(wout)
- self.bytes_out_comp += len(zbuf)
- else:
- zbuf = ''
- ztail = self._zcomp_write.flush(zlib.Z_SYNC_FLUSH)
- self.bytes_out_comp += len(ztail)
- if (len(zbuf) + len(ztail)) > 0:
- self.__wbuf = StringIO()
- self.__trans.write(zbuf + ztail)
- self.__trans.flush()
-
- @property
- def cstringio_buf(self):
- '''Implement the CReadableTransport interface'''
- return self.__rbuf
-
- def cstringio_refill(self, partialread, reqlen):
- '''Implement the CReadableTransport interface for refill'''
- retstring = partialread
- if reqlen < self.DEFAULT_BUFFSIZE:
- retstring += self.read(self.DEFAULT_BUFFSIZE)
- while len(retstring) < reqlen:
- retstring += self.read(reqlen - len(retstring))
- self.__rbuf = StringIO(retstring)
- return self.__rbuf
diff --git a/module/lib/thrift/transport/__init__.py b/module/lib/thrift/transport/__init__.py
deleted file mode 100644
index 46e54fe6b..000000000
--- a/module/lib/thrift/transport/__init__.py
+++ /dev/null
@@ -1,20 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-__all__ = ['TTransport', 'TSocket', 'THttpClient','TZlibTransport']