aboutsummaryrefslogtreecommitdiffstats
path: root/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols
diff options
context:
space:
mode:
Diffstat (limited to 'lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols')
-rwxr-xr-xlib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/__init__.py7
-rwxr-xr-xlib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/amp.py2705
-rwxr-xr-xlib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/basic.py939
-rwxr-xr-xlib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/dict.py362
-rwxr-xr-xlib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/finger.py42
-rwxr-xr-xlib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/ftp.py2955
-rwxr-xr-xlib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/gps/__init__.py1
-rwxr-xr-xlib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/gps/nmea.py209
-rwxr-xr-xlib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/gps/rockwell.py268
-rwxr-xr-xlib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/htb.py297
-rwxr-xr-xlib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/ident.py231
-rwxr-xr-xlib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/loopback.py372
-rwxr-xr-xlib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/memcache.py758
-rwxr-xr-xlib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/mice/__init__.py1
-rwxr-xr-xlib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/mice/mouseman.py127
-rwxr-xr-xlib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/pcp.py204
-rwxr-xr-xlib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/policies.py725
-rwxr-xr-xlib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/portforward.py87
-rwxr-xr-xlib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/postfix.py112
-rwxr-xr-xlib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/shoutcast.py111
-rwxr-xr-xlib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/sip.py1347
-rwxr-xr-xlib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/socks.py240
-rwxr-xr-xlib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/stateful.py52
-rwxr-xr-xlib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/telnet.py325
-rwxr-xr-xlib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/test/__init__.py6
-rwxr-xr-xlib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/test/test_tls.py1499
-rwxr-xr-xlib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/tls.py613
-rwxr-xr-xlib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/wire.py90
28 files changed, 0 insertions, 14685 deletions
diff --git a/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/__init__.py b/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/__init__.py
deleted file mode 100755
index a0796514..00000000
--- a/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/__init__.py
+++ /dev/null
@@ -1,7 +0,0 @@
-# Copyright (c) Twisted Matrix Laboratories.
-# See LICENSE for details.
-
-
-"""
-Twisted Protocols: a collection of internet protocol implementations.
-"""
diff --git a/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/amp.py b/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/amp.py
deleted file mode 100755
index 72a3e7ae..00000000
--- a/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/amp.py
+++ /dev/null
@@ -1,2705 +0,0 @@
-# -*- test-case-name: twisted.test.test_amp -*-
-# Copyright (c) 2005 Divmod, Inc.
-# Copyright (c) Twisted Matrix Laboratories.
-# See LICENSE for details.
-
-"""
-This module implements AMP, the Asynchronous Messaging Protocol.
-
-AMP is a protocol for sending multiple asynchronous request/response pairs over
-the same connection. Requests and responses are both collections of key/value
-pairs.
-
-AMP is a very simple protocol which is not an application. This module is a
-"protocol construction kit" of sorts; it attempts to be the simplest wire-level
-implementation of Deferreds. AMP provides the following base-level features:
-
- - Asynchronous request/response handling (hence the name)
-
- - Requests and responses are both key/value pairs
-
- - Binary transfer of all data: all data is length-prefixed. Your
- application will never need to worry about quoting.
-
- - Command dispatching (like HTTP Verbs): the protocol is extensible, and
- multiple AMP sub-protocols can be grouped together easily.
-
-The protocol implementation also provides a few additional features which are
-not part of the core wire protocol, but are nevertheless very useful:
-
- - Tight TLS integration, with an included StartTLS command.
-
- - Handshaking to other protocols: because AMP has well-defined message
- boundaries and maintains all incoming and outgoing requests for you, you
- can start a connection over AMP and then switch to another protocol.
- This makes it ideal for firewall-traversal applications where you may
- have only one forwarded port but multiple applications that want to use
- it.
-
-Using AMP with Twisted is simple. Each message is a command, with a response.
-You begin by defining a command type. Commands specify their input and output
-in terms of the types that they expect to see in the request and response
-key-value pairs. Here's an example of a command that adds two integers, 'a'
-and 'b'::
-
- class Sum(amp.Command):
- arguments = [('a', amp.Integer()),
- ('b', amp.Integer())]
- response = [('total', amp.Integer())]
-
-Once you have specified a command, you need to make it part of a protocol, and
-define a responder for it. Here's a 'JustSum' protocol that includes a
-responder for our 'Sum' command::
-
- class JustSum(amp.AMP):
- def sum(self, a, b):
- total = a + b
- print 'Did a sum: %d + %d = %d' % (a, b, total)
- return {'total': total}
- Sum.responder(sum)
-
-Later, when you want to actually do a sum, the following expression will return
-a L{Deferred} which will fire with the result::
-
- ClientCreator(reactor, amp.AMP).connectTCP(...).addCallback(
- lambda p: p.callRemote(Sum, a=13, b=81)).addCallback(
- lambda result: result['total'])
-
-Command responders may also return Deferreds, causing the response to be
-sent only once the Deferred fires::
-
- class DelayedSum(amp.AMP):
- def slowSum(self, a, b):
- total = a + b
- result = defer.Deferred()
- reactor.callLater(3, result.callback, {'total': total})
- return result
- Sum.responder(slowSum)
-
-This is transparent to the caller.
-
-You can also define the propagation of specific errors in AMP. For example,
-for the slightly more complicated case of division, we might have to deal with
-division by zero::
-
- class Divide(amp.Command):
- arguments = [('numerator', amp.Integer()),
- ('denominator', amp.Integer())]
- response = [('result', amp.Float())]
- errors = {ZeroDivisionError: 'ZERO_DIVISION'}
-
-The 'errors' mapping here tells AMP that if a responder to Divide emits a
-L{ZeroDivisionError}, then the other side should be informed that an error of
-the type 'ZERO_DIVISION' has occurred. Writing a responder which takes
-advantage of this is very simple - just raise your exception normally::
-
- class JustDivide(amp.AMP):
- def divide(self, numerator, denominator):
- result = numerator / denominator
- print 'Divided: %d / %d = %d' % (numerator, denominator, total)
- return {'result': result}
- Divide.responder(divide)
-
-On the client side, the errors mapping will be used to determine what the
-'ZERO_DIVISION' error means, and translated into an asynchronous exception,
-which can be handled normally as any L{Deferred} would be::
-
- def trapZero(result):
- result.trap(ZeroDivisionError)
- print "Divided by zero: returning INF"
- return 1e1000
- ClientCreator(reactor, amp.AMP).connectTCP(...).addCallback(
- lambda p: p.callRemote(Divide, numerator=1234,
- denominator=0)
- ).addErrback(trapZero)
-
-For a complete, runnable example of both of these commands, see the files in
-the Twisted repository::
-
- doc/core/examples/ampserver.py
- doc/core/examples/ampclient.py
-
-On the wire, AMP is a protocol which uses 2-byte lengths to prefix keys and
-values, and empty keys to separate messages::
-
- <2-byte length><key><2-byte length><value>
- <2-byte length><key><2-byte length><value>
- ...
- <2-byte length><key><2-byte length><value>
- <NUL><NUL> # Empty Key == End of Message
-
-And so on. Because it's tedious to refer to lengths and NULs constantly, the
-documentation will refer to packets as if they were newline delimited, like
-so::
-
- C: _command: sum
- C: _ask: ef639e5c892ccb54
- C: a: 13
- C: b: 81
-
- S: _answer: ef639e5c892ccb54
- S: total: 94
-
-Notes:
-
-In general, the order of keys is arbitrary. Specific uses of AMP may impose an
-ordering requirement, but unless this is specified explicitly, any ordering may
-be generated and any ordering must be accepted. This applies to the
-command-related keys I{_command} and I{_ask} as well as any other keys.
-
-Values are limited to the maximum encodable size in a 16-bit length, 65535
-bytes.
-
-Keys are limited to the maximum encodable size in a 8-bit length, 255 bytes.
-Note that we still use 2-byte lengths to encode keys. This small redundancy
-has several features:
-
- - If an implementation becomes confused and starts emitting corrupt data,
- or gets keys confused with values, many common errors will be signalled
- immediately instead of delivering obviously corrupt packets.
-
- - A single NUL will separate every key, and a double NUL separates
- messages. This provides some redundancy when debugging traffic dumps.
-
- - NULs will be present at regular intervals along the protocol, providing
- some padding for otherwise braindead C implementations of the protocol,
- so that <stdio.h> string functions will see the NUL and stop.
-
- - This makes it possible to run an AMP server on a port also used by a
- plain-text protocol, and easily distinguish between non-AMP clients (like
- web browsers) which issue non-NUL as the first byte, and AMP clients,
- which always issue NUL as the first byte.
-"""
-
-__metaclass__ = type
-
-import types, warnings
-
-from cStringIO import StringIO
-from struct import pack
-import decimal, datetime
-from itertools import count
-
-from zope.interface import Interface, implements
-
-from twisted.python.compat import set
-from twisted.python.util import unsignedID
-from twisted.python.reflect import accumulateClassDict
-from twisted.python.failure import Failure
-from twisted.python import log, filepath
-
-from twisted.internet.interfaces import IFileDescriptorReceiver
-from twisted.internet.main import CONNECTION_LOST
-from twisted.internet.error import PeerVerifyError, ConnectionLost
-from twisted.internet.error import ConnectionClosed
-from twisted.internet.defer import Deferred, maybeDeferred, fail
-from twisted.protocols.basic import Int16StringReceiver, StatefulStringProtocol
-
-try:
- from twisted.internet import ssl
-except ImportError:
- ssl = None
-
-if ssl and not ssl.supported:
- ssl = None
-
-if ssl is not None:
- from twisted.internet.ssl import CertificateOptions, Certificate, DN, KeyPair
-
-ASK = '_ask'
-ANSWER = '_answer'
-COMMAND = '_command'
-ERROR = '_error'
-ERROR_CODE = '_error_code'
-ERROR_DESCRIPTION = '_error_description'
-UNKNOWN_ERROR_CODE = 'UNKNOWN'
-UNHANDLED_ERROR_CODE = 'UNHANDLED'
-
-MAX_KEY_LENGTH = 0xff
-MAX_VALUE_LENGTH = 0xffff
-
-
-class IArgumentType(Interface):
- """
- An L{IArgumentType} can serialize a Python object into an AMP box and
- deserialize information from an AMP box back into a Python object.
-
- @since: 9.0
- """
- def fromBox(name, strings, objects, proto):
- """
- Given an argument name and an AMP box containing serialized values,
- extract one or more Python objects and add them to the C{objects}
- dictionary.
-
- @param name: The name associated with this argument. Most commonly,
- this is the key which can be used to find a serialized value in
- C{strings} and which should be used as the key in C{objects} to
- associate with a structured Python object.
- @type name: C{str}
-
- @param strings: The AMP box from which to extract one or more
- values.
- @type strings: C{dict}
-
- @param objects: The output dictionary to populate with the value for
- this argument.
- @type objects: C{dict}
-
- @param proto: The protocol instance which received the AMP box being
- interpreted. Most likely this is an instance of L{AMP}, but
- this is not guaranteed.
-
- @return: C{None}
- """
-
-
- def toBox(name, strings, objects, proto):
- """
- Given an argument name and a dictionary containing structured Python
- objects, serialize values into one or more strings and add them to
- the C{strings} dictionary.
-
- @param name: The name associated with this argument. Most commonly,
- this is the key which can be used to find an object in
- C{objects} and which should be used as the key in C{strings} to
- associate with a C{str} giving the serialized form of that
- object.
- @type name: C{str}
-
- @param strings: The AMP box into which to insert one or more
- strings.
- @type strings: C{dict}
-
- @param objects: The input dictionary from which to extract Python
- objects to serialize.
- @type objects: C{dict}
-
- @param proto: The protocol instance which will send the AMP box once
- it is fully populated. Most likely this is an instance of
- L{AMP}, but this is not guaranteed.
-
- @return: C{None}
- """
-
-
-
-class IBoxSender(Interface):
- """
- A transport which can send L{AmpBox} objects.
- """
-
- def sendBox(box):
- """
- Send an L{AmpBox}.
-
- @raise ProtocolSwitched: if the underlying protocol has been
- switched.
-
- @raise ConnectionLost: if the underlying connection has already been
- lost.
- """
-
- def unhandledError(failure):
- """
- An unhandled error occurred in response to a box. Log it
- appropriately.
-
- @param failure: a L{Failure} describing the error that occurred.
- """
-
-
-
-class IBoxReceiver(Interface):
- """
- An application object which can receive L{AmpBox} objects and dispatch them
- appropriately.
- """
-
- def startReceivingBoxes(boxSender):
- """
- The L{ampBoxReceived} method will start being called; boxes may be
- responded to by responding to the given L{IBoxSender}.
-
- @param boxSender: an L{IBoxSender} provider.
- """
-
-
- def ampBoxReceived(box):
- """
- A box was received from the transport; dispatch it appropriately.
- """
-
-
- def stopReceivingBoxes(reason):
- """
- No further boxes will be received on this connection.
-
- @type reason: L{Failure}
- """
-
-
-
-class IResponderLocator(Interface):
- """
- An application object which can look up appropriate responder methods for
- AMP commands.
- """
-
- def locateResponder(name):
- """
- Locate a responder method appropriate for the named command.
-
- @param name: the wire-level name (commandName) of the AMP command to be
- responded to.
-
- @return: a 1-argument callable that takes an L{AmpBox} with argument
- values for the given command, and returns an L{AmpBox} containing
- argument values for the named command, or a L{Deferred} that fires the
- same.
- """
-
-
-
-class AmpError(Exception):
- """
- Base class of all Amp-related exceptions.
- """
-
-
-
-class ProtocolSwitched(Exception):
- """
- Connections which have been switched to other protocols can no longer
- accept traffic at the AMP level. This is raised when you try to send it.
- """
-
-
-
-class OnlyOneTLS(AmpError):
- """
- This is an implementation limitation; TLS may only be started once per
- connection.
- """
-
-
-
-class NoEmptyBoxes(AmpError):
- """
- You can't have empty boxes on the connection. This is raised when you
- receive or attempt to send one.
- """
-
-
-
-class InvalidSignature(AmpError):
- """
- You didn't pass all the required arguments.
- """
-
-
-
-class TooLong(AmpError):
- """
- One of the protocol's length limitations was violated.
-
- @ivar isKey: true if the string being encoded in a key position, false if
- it was in a value position.
-
- @ivar isLocal: Was the string encoded locally, or received too long from
- the network? (It's only physically possible to encode "too long" values on
- the network for keys.)
-
- @ivar value: The string that was too long.
-
- @ivar keyName: If the string being encoded was in a value position, what
- key was it being encoded for?
- """
-
- def __init__(self, isKey, isLocal, value, keyName=None):
- AmpError.__init__(self)
- self.isKey = isKey
- self.isLocal = isLocal
- self.value = value
- self.keyName = keyName
-
-
- def __repr__(self):
- hdr = self.isKey and "key" or "value"
- if not self.isKey:
- hdr += ' ' + repr(self.keyName)
- lcl = self.isLocal and "local" or "remote"
- return "%s %s too long: %d" % (lcl, hdr, len(self.value))
-
-
-
-class BadLocalReturn(AmpError):
- """
- A bad value was returned from a local command; we were unable to coerce it.
- """
- def __init__(self, message, enclosed):
- AmpError.__init__(self)
- self.message = message
- self.enclosed = enclosed
-
-
- def __repr__(self):
- return self.message + " " + self.enclosed.getBriefTraceback()
-
- __str__ = __repr__
-
-
-
-class RemoteAmpError(AmpError):
- """
- This error indicates that something went wrong on the remote end of the
- connection, and the error was serialized and transmitted to you.
- """
- def __init__(self, errorCode, description, fatal=False, local=None):
- """Create a remote error with an error code and description.
-
- @param errorCode: the AMP error code of this error.
-
- @param description: some text to show to the user.
-
- @param fatal: a boolean, true if this error should terminate the
- connection.
-
- @param local: a local Failure, if one exists.
- """
- if local:
- localwhat = ' (local)'
- othertb = local.getBriefTraceback()
- else:
- localwhat = ''
- othertb = ''
- Exception.__init__(self, "Code<%s>%s: %s%s" % (
- errorCode, localwhat,
- description, othertb))
- self.local = local
- self.errorCode = errorCode
- self.description = description
- self.fatal = fatal
-
-
-
-class UnknownRemoteError(RemoteAmpError):
- """
- This means that an error whose type we can't identify was raised from the
- other side.
- """
- def __init__(self, description):
- errorCode = UNKNOWN_ERROR_CODE
- RemoteAmpError.__init__(self, errorCode, description)
-
-
-
-class MalformedAmpBox(AmpError):
- """
- This error indicates that the wire-level protocol was malformed.
- """
-
-
-
-class UnhandledCommand(AmpError):
- """
- A command received via amp could not be dispatched.
- """
-
-
-
-class IncompatibleVersions(AmpError):
- """
- It was impossible to negotiate a compatible version of the protocol with
- the other end of the connection.
- """
-
-
-PROTOCOL_ERRORS = {UNHANDLED_ERROR_CODE: UnhandledCommand}
-
-class AmpBox(dict):
- """
- I am a packet in the AMP protocol, much like a regular str:str dictionary.
- """
- __slots__ = [] # be like a regular dictionary, don't magically
- # acquire a __dict__...
-
-
- def copy(self):
- """
- Return another AmpBox just like me.
- """
- newBox = self.__class__()
- newBox.update(self)
- return newBox
-
-
- def serialize(self):
- """
- Convert me into a wire-encoded string.
-
- @return: a str encoded according to the rules described in the module
- docstring.
- """
- i = self.items()
- i.sort()
- L = []
- w = L.append
- for k, v in i:
- if type(k) == unicode:
- raise TypeError("Unicode key not allowed: %r" % k)
- if type(v) == unicode:
- raise TypeError(
- "Unicode value for key %r not allowed: %r" % (k, v))
- if len(k) > MAX_KEY_LENGTH:
- raise TooLong(True, True, k, None)
- if len(v) > MAX_VALUE_LENGTH:
- raise TooLong(False, True, v, k)
- for kv in k, v:
- w(pack("!H", len(kv)))
- w(kv)
- w(pack("!H", 0))
- return ''.join(L)
-
-
- def _sendTo(self, proto):
- """
- Serialize and send this box to a Amp instance. By the time it is being
- sent, several keys are required. I must have exactly ONE of::
-
- _ask
- _answer
- _error
-
- If the '_ask' key is set, then the '_command' key must also be
- set.
-
- @param proto: an AMP instance.
- """
- proto.sendBox(self)
-
- def __repr__(self):
- return 'AmpBox(%s)' % (dict.__repr__(self),)
-
-# amp.Box => AmpBox
-
-Box = AmpBox
-
-class QuitBox(AmpBox):
- """
- I am an AmpBox that, upon being sent, terminates the connection.
- """
- __slots__ = []
-
-
- def __repr__(self):
- return 'QuitBox(**%s)' % (super(QuitBox, self).__repr__(),)
-
-
- def _sendTo(self, proto):
- """
- Immediately call loseConnection after sending.
- """
- super(QuitBox, self)._sendTo(proto)
- proto.transport.loseConnection()
-
-
-
-class _SwitchBox(AmpBox):
- """
- Implementation detail of ProtocolSwitchCommand: I am a AmpBox which sets
- up state for the protocol to switch.
- """
-
- # DON'T set __slots__ here; we do have an attribute.
-
- def __init__(self, innerProto, **kw):
- """
- Create a _SwitchBox with the protocol to switch to after being sent.
-
- @param innerProto: the protocol instance to switch to.
- @type innerProto: an IProtocol provider.
- """
- super(_SwitchBox, self).__init__(**kw)
- self.innerProto = innerProto
-
-
- def __repr__(self):
- return '_SwitchBox(%r, **%s)' % (self.innerProto,
- dict.__repr__(self),)
-
-
- def _sendTo(self, proto):
- """
- Send me; I am the last box on the connection. All further traffic will be
- over the new protocol.
- """
- super(_SwitchBox, self)._sendTo(proto)
- proto._lockForSwitch()
- proto._switchTo(self.innerProto)
-
-
-
-class BoxDispatcher:
- """
- A L{BoxDispatcher} dispatches '_ask', '_answer', and '_error' L{AmpBox}es,
- both incoming and outgoing, to their appropriate destinations.
-
- Outgoing commands are converted into L{Deferred}s and outgoing boxes, and
- associated tracking state to fire those L{Deferred} when '_answer' boxes
- come back. Incoming '_answer' and '_error' boxes are converted into
- callbacks and errbacks on those L{Deferred}s, respectively.
-
- Incoming '_ask' boxes are converted into method calls on a supplied method
- locator.
-
- @ivar _outstandingRequests: a dictionary mapping request IDs to
- L{Deferred}s which were returned for those requests.
-
- @ivar locator: an object with a L{locateResponder} method that locates a
- responder function that takes a Box and returns a result (either a Box or a
- Deferred which fires one).
-
- @ivar boxSender: an object which can send boxes, via the L{_sendBox}
- method, such as an L{AMP} instance.
- @type boxSender: L{IBoxSender}
- """
-
- implements(IBoxReceiver)
-
- _failAllReason = None
- _outstandingRequests = None
- _counter = 0L
- boxSender = None
-
- def __init__(self, locator):
- self._outstandingRequests = {}
- self.locator = locator
-
-
- def startReceivingBoxes(self, boxSender):
- """
- The given boxSender is going to start calling boxReceived on this
- L{BoxDispatcher}.
-
- @param boxSender: The L{IBoxSender} to send command responses to.
- """
- self.boxSender = boxSender
-
-
- def stopReceivingBoxes(self, reason):
- """
- No further boxes will be received here. Terminate all currently
- oustanding command deferreds with the given reason.
- """
- self.failAllOutgoing(reason)
-
-
- def failAllOutgoing(self, reason):
- """
- Call the errback on all outstanding requests awaiting responses.
-
- @param reason: the Failure instance to pass to those errbacks.
- """
- self._failAllReason = reason
- OR = self._outstandingRequests.items()
- self._outstandingRequests = None # we can never send another request
- for key, value in OR:
- value.errback(reason)
-
-
- def _nextTag(self):
- """
- Generate protocol-local serial numbers for _ask keys.
-
- @return: a string that has not yet been used on this connection.
- """
- self._counter += 1
- return '%x' % (self._counter,)
-
-
- def _sendBoxCommand(self, command, box, requiresAnswer=True):
- """
- Send a command across the wire with the given C{amp.Box}.
-
- Mutate the given box to give it any additional keys (_command, _ask)
- required for the command and request/response machinery, then send it.
-
- If requiresAnswer is True, returns a C{Deferred} which fires when a
- response is received. The C{Deferred} is fired with an C{amp.Box} on
- success, or with an C{amp.RemoteAmpError} if an error is received.
-
- If the Deferred fails and the error is not handled by the caller of
- this method, the failure will be logged and the connection dropped.
-
- @param command: a str, the name of the command to issue.
-
- @param box: an AmpBox with the arguments for the command.
-
- @param requiresAnswer: a boolean. Defaults to True. If True, return a
- Deferred which will fire when the other side responds to this command.
- If False, return None and do not ask the other side for acknowledgement.
-
- @return: a Deferred which fires the AmpBox that holds the response to
- this command, or None, as specified by requiresAnswer.
-
- @raise ProtocolSwitched: if the protocol has been switched.
- """
- if self._failAllReason is not None:
- return fail(self._failAllReason)
- box[COMMAND] = command
- tag = self._nextTag()
- if requiresAnswer:
- box[ASK] = tag
- box._sendTo(self.boxSender)
- if requiresAnswer:
- result = self._outstandingRequests[tag] = Deferred()
- else:
- result = None
- return result
-
-
- def callRemoteString(self, command, requiresAnswer=True, **kw):
- """
- This is a low-level API, designed only for optimizing simple messages
- for which the overhead of parsing is too great.
-
- @param command: a str naming the command.
-
- @param kw: arguments to the amp box.
-
- @param requiresAnswer: a boolean. Defaults to True. If True, return a
- Deferred which will fire when the other side responds to this command.
- If False, return None and do not ask the other side for acknowledgement.
-
- @return: a Deferred which fires the AmpBox that holds the response to
- this command, or None, as specified by requiresAnswer.
- """
- box = Box(kw)
- return self._sendBoxCommand(command, box, requiresAnswer)
-
-
- def callRemote(self, commandType, *a, **kw):
- """
- This is the primary high-level API for sending messages via AMP. Invoke it
- with a command and appropriate arguments to send a message to this
- connection's peer.
-
- @param commandType: a subclass of Command.
- @type commandType: L{type}
-
- @param a: Positional (special) parameters taken by the command.
- Positional parameters will typically not be sent over the wire. The
- only command included with AMP which uses positional parameters is
- L{ProtocolSwitchCommand}, which takes the protocol that will be
- switched to as its first argument.
-
- @param kw: Keyword arguments taken by the command. These are the
- arguments declared in the command's 'arguments' attribute. They will
- be encoded and sent to the peer as arguments for the L{commandType}.
-
- @return: If L{commandType} has a C{requiresAnswer} attribute set to
- L{False}, then return L{None}. Otherwise, return a L{Deferred} which
- fires with a dictionary of objects representing the result of this
- call. Additionally, this L{Deferred} may fail with an exception
- representing a connection failure, with L{UnknownRemoteError} if the
- other end of the connection fails for an unknown reason, or with any
- error specified as a key in L{commandType}'s C{errors} dictionary.
- """
-
- # XXX this takes command subclasses and not command objects on purpose.
- # There's really no reason to have all this back-and-forth between
- # command objects and the protocol, and the extra object being created
- # (the Command instance) is pointless. Command is kind of like
- # Interface, and should be more like it.
-
- # In other words, the fact that commandType is instantiated here is an
- # implementation detail. Don't rely on it.
-
- try:
- co = commandType(*a, **kw)
- except:
- return fail()
- return co._doCommand(self)
-
-
- def unhandledError(self, failure):
- """
- This is a terminal callback called after application code has had a
- chance to quash any errors.
- """
- return self.boxSender.unhandledError(failure)
-
-
- def _answerReceived(self, box):
- """
- An AMP box was received that answered a command previously sent with
- L{callRemote}.
-
- @param box: an AmpBox with a value for its L{ANSWER} key.
- """
- question = self._outstandingRequests.pop(box[ANSWER])
- question.addErrback(self.unhandledError)
- question.callback(box)
-
-
- def _errorReceived(self, box):
- """
- An AMP box was received that answered a command previously sent with
- L{callRemote}, with an error.
-
- @param box: an L{AmpBox} with a value for its L{ERROR}, L{ERROR_CODE},
- and L{ERROR_DESCRIPTION} keys.
- """
- question = self._outstandingRequests.pop(box[ERROR])
- question.addErrback(self.unhandledError)
- errorCode = box[ERROR_CODE]
- description = box[ERROR_DESCRIPTION]
- if errorCode in PROTOCOL_ERRORS:
- exc = PROTOCOL_ERRORS[errorCode](errorCode, description)
- else:
- exc = RemoteAmpError(errorCode, description)
- question.errback(Failure(exc))
-
-
- def _commandReceived(self, box):
- """
- @param box: an L{AmpBox} with a value for its L{COMMAND} and L{ASK}
- keys.
- """
- def formatAnswer(answerBox):
- answerBox[ANSWER] = box[ASK]
- return answerBox
- def formatError(error):
- if error.check(RemoteAmpError):
- code = error.value.errorCode
- desc = error.value.description
- if error.value.fatal:
- errorBox = QuitBox()
- else:
- errorBox = AmpBox()
- else:
- errorBox = QuitBox()
- log.err(error) # here is where server-side logging happens
- # if the error isn't handled
- code = UNKNOWN_ERROR_CODE
- desc = "Unknown Error"
- errorBox[ERROR] = box[ASK]
- errorBox[ERROR_DESCRIPTION] = desc
- errorBox[ERROR_CODE] = code
- return errorBox
- deferred = self.dispatchCommand(box)
- if ASK in box:
- deferred.addCallbacks(formatAnswer, formatError)
- deferred.addCallback(self._safeEmit)
- deferred.addErrback(self.unhandledError)
-
-
- def ampBoxReceived(self, box):
- """
- An AmpBox was received, representing a command, or an answer to a
- previously issued command (either successful or erroneous). Respond to
- it according to its contents.
-
- @param box: an AmpBox
-
- @raise NoEmptyBoxes: when a box is received that does not contain an
- '_answer', '_command' / '_ask', or '_error' key; i.e. one which does not
- fit into the command / response protocol defined by AMP.
- """
- if ANSWER in box:
- self._answerReceived(box)
- elif ERROR in box:
- self._errorReceived(box)
- elif COMMAND in box:
- self._commandReceived(box)
- else:
- raise NoEmptyBoxes(box)
-
-
- def _safeEmit(self, aBox):
- """
- Emit a box, ignoring L{ProtocolSwitched} and L{ConnectionLost} errors
- which cannot be usefully handled.
- """
- try:
- aBox._sendTo(self.boxSender)
- except (ProtocolSwitched, ConnectionLost):
- pass
-
-
- def dispatchCommand(self, box):
- """
- A box with a _command key was received.
-
- Dispatch it to a local handler call it.
-
- @param proto: an AMP instance.
- @param box: an AmpBox to be dispatched.
- """
- cmd = box[COMMAND]
- responder = self.locator.locateResponder(cmd)
- if responder is None:
- return fail(RemoteAmpError(
- UNHANDLED_ERROR_CODE,
- "Unhandled Command: %r" % (cmd,),
- False,
- local=Failure(UnhandledCommand())))
- return maybeDeferred(responder, box)
-
-
-
-class CommandLocator:
- """
- A L{CommandLocator} is a collection of responders to AMP L{Command}s, with
- the help of the L{Command.responder} decorator.
- """
-
- class __metaclass__(type):
- """
- This metaclass keeps track of all of the Command.responder-decorated
- methods defined since the last CommandLocator subclass was defined. It
- assumes (usually correctly, but unfortunately not necessarily so) that
- those commands responders were all declared as methods of the class
- being defined. Note that this list can be incorrect if users use the
- Command.responder decorator outside the context of a CommandLocator
- class declaration.
-
- Command responders defined on subclasses are given precedence over
- those inherited from a base class.
-
- The Command.responder decorator explicitly cooperates with this
- metaclass.
- """
-
- _currentClassCommands = []
- def __new__(cls, name, bases, attrs):
- commands = cls._currentClassCommands[:]
- cls._currentClassCommands[:] = []
- cd = attrs['_commandDispatch'] = {}
- subcls = type.__new__(cls, name, bases, attrs)
- ancestors = list(subcls.__mro__[1:])
- ancestors.reverse()
- for ancestor in ancestors:
- cd.update(getattr(ancestor, '_commandDispatch', {}))
- for commandClass, responderFunc in commands:
- cd[commandClass.commandName] = (commandClass, responderFunc)
- if (bases and (
- subcls.lookupFunction != CommandLocator.lookupFunction)):
- def locateResponder(self, name):
- warnings.warn(
- "Override locateResponder, not lookupFunction.",
- category=PendingDeprecationWarning,
- stacklevel=2)
- return self.lookupFunction(name)
- subcls.locateResponder = locateResponder
- return subcls
-
-
- implements(IResponderLocator)
-
-
- def _wrapWithSerialization(self, aCallable, command):
- """
- Wrap aCallable with its command's argument de-serialization
- and result serialization logic.
-
- @param aCallable: a callable with a 'command' attribute, designed to be
- called with keyword arguments.
-
- @param command: the command class whose serialization to use.
-
- @return: a 1-arg callable which, when invoked with an AmpBox, will
- deserialize the argument list and invoke appropriate user code for the
- callable's command, returning a Deferred which fires with the result or
- fails with an error.
- """
- def doit(box):
- kw = command.parseArguments(box, self)
- def checkKnownErrors(error):
- key = error.trap(*command.allErrors)
- code = command.allErrors[key]
- desc = str(error.value)
- return Failure(RemoteAmpError(
- code, desc, key in command.fatalErrors, local=error))
- def makeResponseFor(objects):
- try:
- return command.makeResponse(objects, self)
- except:
- # let's helpfully log this.
- originalFailure = Failure()
- raise BadLocalReturn(
- "%r returned %r and %r could not serialize it" % (
- aCallable,
- objects,
- command),
- originalFailure)
- return maybeDeferred(aCallable, **kw).addCallback(
- makeResponseFor).addErrback(
- checkKnownErrors)
- return doit
-
-
- def lookupFunction(self, name):
- """
- Deprecated synonym for L{locateResponder}
- """
- if self.__class__.lookupFunction != CommandLocator.lookupFunction:
- return CommandLocator.locateResponder(self, name)
- else:
- warnings.warn("Call locateResponder, not lookupFunction.",
- category=PendingDeprecationWarning,
- stacklevel=2)
- return self.locateResponder(name)
-
-
- def locateResponder(self, name):
- """
- Locate a callable to invoke when executing the named command.
-
- @param name: the normalized name (from the wire) of the command.
-
- @return: a 1-argument function that takes a Box and returns a box or a
- Deferred which fires a Box, for handling the command identified by the
- given name, or None, if no appropriate responder can be found.
- """
- # Try to find a high-level method to invoke, and if we can't find one,
- # fall back to a low-level one.
- cd = self._commandDispatch
- if name in cd:
- commandClass, responderFunc = cd[name]
- responderMethod = types.MethodType(
- responderFunc, self, self.__class__)
- return self._wrapWithSerialization(responderMethod, commandClass)
-
-
-
-class SimpleStringLocator(object):
- """
- Implement the L{locateResponder} method to do simple, string-based
- dispatch.
- """
-
- implements(IResponderLocator)
-
- baseDispatchPrefix = 'amp_'
-
- def locateResponder(self, name):
- """
- Locate a callable to invoke when executing the named command.
-
- @return: a function with the name C{"amp_" + name} on L{self}, or None
- if no such function exists. This function will then be called with the
- L{AmpBox} itself as an argument.
-
- @param name: the normalized name (from the wire) of the command.
- """
- fName = self.baseDispatchPrefix + (name.upper())
- return getattr(self, fName, None)
-
-
-
-PYTHON_KEYWORDS = [
- 'and', 'del', 'for', 'is', 'raise', 'assert', 'elif', 'from', 'lambda',
- 'return', 'break', 'else', 'global', 'not', 'try', 'class', 'except',
- 'if', 'or', 'while', 'continue', 'exec', 'import', 'pass', 'yield',
- 'def', 'finally', 'in', 'print']
-
-
-
-def _wireNameToPythonIdentifier(key):
- """
- (Private) Normalize an argument name from the wire for use with Python
- code. If the return value is going to be a python keyword it will be
- capitalized. If it contains any dashes they will be replaced with
- underscores.
-
- The rationale behind this method is that AMP should be an inherently
- multi-language protocol, so message keys may contain all manner of bizarre
- bytes. This is not a complete solution; there are still forms of arguments
- that this implementation will be unable to parse. However, Python
- identifiers share a huge raft of properties with identifiers from many
- other languages, so this is a 'good enough' effort for now. We deal
- explicitly with dashes because that is the most likely departure: Lisps
- commonly use dashes to separate method names, so protocols initially
- implemented in a lisp amp dialect may use dashes in argument or command
- names.
-
- @param key: a str, looking something like 'foo-bar-baz' or 'from'
-
- @return: a str which is a valid python identifier, looking something like
- 'foo_bar_baz' or 'From'.
- """
- lkey = key.replace("-", "_")
- if lkey in PYTHON_KEYWORDS:
- return lkey.title()
- return lkey
-
-
-
-class Argument:
- """
- Base-class of all objects that take values from Amp packets and convert
- them into objects for Python functions.
-
- This implementation of L{IArgumentType} provides several higher-level
- hooks for subclasses to override. See L{toString} and L{fromString}
- which will be used to define the behavior of L{IArgumentType.toBox} and
- L{IArgumentType.fromBox}, respectively.
- """
- implements(IArgumentType)
-
- optional = False
-
-
- def __init__(self, optional=False):
- """
- Create an Argument.
-
- @param optional: a boolean indicating whether this argument can be
- omitted in the protocol.
- """
- self.optional = optional
-
-
- def retrieve(self, d, name, proto):
- """
- Retrieve the given key from the given dictionary, removing it if found.
-
- @param d: a dictionary.
-
- @param name: a key in L{d}.
-
- @param proto: an instance of an AMP.
-
- @raise KeyError: if I am not optional and no value was found.
-
- @return: d[name].
- """
- if self.optional:
- value = d.get(name)
- if value is not None:
- del d[name]
- else:
- value = d.pop(name)
- return value
-
-
- def fromBox(self, name, strings, objects, proto):
- """
- Populate an 'out' dictionary with mapping names to Python values
- decoded from an 'in' AmpBox mapping strings to string values.
-
- @param name: the argument name to retrieve
- @type name: str
-
- @param strings: The AmpBox to read string(s) from, a mapping of
- argument names to string values.
- @type strings: AmpBox
-
- @param objects: The dictionary to write object(s) to, a mapping of
- names to Python objects.
- @type objects: dict
-
- @param proto: an AMP instance.
- """
- st = self.retrieve(strings, name, proto)
- nk = _wireNameToPythonIdentifier(name)
- if self.optional and st is None:
- objects[nk] = None
- else:
- objects[nk] = self.fromStringProto(st, proto)
-
-
- def toBox(self, name, strings, objects, proto):
- """
- Populate an 'out' AmpBox with strings encoded from an 'in' dictionary
- mapping names to Python values.
-
- @param name: the argument name to retrieve
- @type name: str
-
- @param strings: The AmpBox to write string(s) to, a mapping of
- argument names to string values.
- @type strings: AmpBox
-
- @param objects: The dictionary to read object(s) from, a mapping of
- names to Python objects.
-
- @type objects: dict
-
- @param proto: the protocol we are converting for.
- @type proto: AMP
- """
- obj = self.retrieve(objects, _wireNameToPythonIdentifier(name), proto)
- if self.optional and obj is None:
- # strings[name] = None
- pass
- else:
- strings[name] = self.toStringProto(obj, proto)
-
-
- def fromStringProto(self, inString, proto):
- """
- Convert a string to a Python value.
-
- @param inString: the string to convert.
-
- @param proto: the protocol we are converting for.
- @type proto: AMP
-
- @return: a Python object.
- """
- return self.fromString(inString)
-
-
- def toStringProto(self, inObject, proto):
- """
- Convert a Python object to a string.
-
- @param inObject: the object to convert.
-
- @param proto: the protocol we are converting for.
- @type proto: AMP
- """
- return self.toString(inObject)
-
-
- def fromString(self, inString):
- """
- Convert a string to a Python object. Subclasses must implement this.
-
- @param inString: the string to convert.
- @type inString: str
-
- @return: the decoded value from inString
- """
-
-
- def toString(self, inObject):
- """
- Convert a Python object into a string for passing over the network.
-
- @param inObject: an object of the type that this Argument is intended
- to deal with.
-
- @return: the wire encoding of inObject
- @rtype: str
- """
-
-
-
-class Integer(Argument):
- """
- Encode any integer values of any size on the wire as the string
- representation.
-
- Example: C{123} becomes C{"123"}
- """
- fromString = int
- def toString(self, inObject):
- return str(int(inObject))
-
-
-
-class String(Argument):
- """
- Don't do any conversion at all; just pass through 'str'.
- """
- def toString(self, inObject):
- return inObject
-
-
- def fromString(self, inString):
- return inString
-
-
-
-class Float(Argument):
- """
- Encode floating-point values on the wire as their repr.
- """
- fromString = float
- toString = repr
-
-
-
-class Boolean(Argument):
- """
- Encode True or False as "True" or "False" on the wire.
- """
- def fromString(self, inString):
- if inString == 'True':
- return True
- elif inString == 'False':
- return False
- else:
- raise TypeError("Bad boolean value: %r" % (inString,))
-
-
- def toString(self, inObject):
- if inObject:
- return 'True'
- else:
- return 'False'
-
-
-
-class Unicode(String):
- """
- Encode a unicode string on the wire as UTF-8.
- """
-
- def toString(self, inObject):
- # assert isinstance(inObject, unicode)
- return String.toString(self, inObject.encode('utf-8'))
-
-
- def fromString(self, inString):
- # assert isinstance(inString, str)
- return String.fromString(self, inString).decode('utf-8')
-
-
-
-class Path(Unicode):
- """
- Encode and decode L{filepath.FilePath} instances as paths on the wire.
-
- This is really intended for use with subprocess communication tools:
- exchanging pathnames on different machines over a network is not generally
- meaningful, but neither is it disallowed; you can use this to communicate
- about NFS paths, for example.
- """
- def fromString(self, inString):
- return filepath.FilePath(Unicode.fromString(self, inString))
-
-
- def toString(self, inObject):
- return Unicode.toString(self, inObject.path)
-
-
-
-class ListOf(Argument):
- """
- Encode and decode lists of instances of a single other argument type.
-
- For example, if you want to pass::
-
- [3, 7, 9, 15]
-
- You can create an argument like this::
-
- ListOf(Integer())
-
- The serialized form of the entire list is subject to the limit imposed by
- L{MAX_VALUE_LENGTH}. List elements are represented as 16-bit length
- prefixed strings. The argument type passed to the L{ListOf} initializer is
- responsible for producing the serialized form of each element.
-
- @ivar elementType: The L{Argument} instance used to encode and decode list
- elements (note, not an arbitrary L{IArgument} implementation:
- arguments must be implemented using only the C{fromString} and
- C{toString} methods, not the C{fromBox} and C{toBox} methods).
-
- @param optional: a boolean indicating whether this argument can be
- omitted in the protocol.
-
- @since: 10.0
- """
- def __init__(self, elementType, optional=False):
- self.elementType = elementType
- Argument.__init__(self, optional)
-
-
- def fromString(self, inString):
- """
- Convert the serialized form of a list of instances of some type back
- into that list.
- """
- strings = []
- parser = Int16StringReceiver()
- parser.stringReceived = strings.append
- parser.dataReceived(inString)
- return map(self.elementType.fromString, strings)
-
-
- def toString(self, inObject):
- """
- Serialize the given list of objects to a single string.
- """
- strings = []
- for obj in inObject:
- serialized = self.elementType.toString(obj)
- strings.append(pack('!H', len(serialized)))
- strings.append(serialized)
- return ''.join(strings)
-
-
-
-class AmpList(Argument):
- """
- Convert a list of dictionaries into a list of AMP boxes on the wire.
-
- For example, if you want to pass::
-
- [{'a': 7, 'b': u'hello'}, {'a': 9, 'b': u'goodbye'}]
-
- You might use an AmpList like this in your arguments or response list::
-
- AmpList([('a', Integer()),
- ('b', Unicode())])
- """
- def __init__(self, subargs, optional=False):
- """
- Create an AmpList.
-
- @param subargs: a list of 2-tuples of ('name', argument) describing the
- schema of the dictionaries in the sequence of amp boxes.
-
- @param optional: a boolean indicating whether this argument can be
- omitted in the protocol.
- """
- self.subargs = subargs
- Argument.__init__(self, optional)
-
-
- def fromStringProto(self, inString, proto):
- boxes = parseString(inString)
- values = [_stringsToObjects(box, self.subargs, proto)
- for box in boxes]
- return values
-
-
- def toStringProto(self, inObject, proto):
- return ''.join([_objectsToStrings(
- objects, self.subargs, Box(), proto
- ).serialize() for objects in inObject])
-
-
-
-class Descriptor(Integer):
- """
- Encode and decode file descriptors for exchange over a UNIX domain socket.
-
- This argument type requires an AMP connection set up over an
- L{IUNIXTransport<twisted.internet.interfaces.IUNIXTransport>} provider (for
- example, the kind of connection created by
- L{IReactorUNIX.connectUNIX<twisted.internet.interfaces.IReactorUNIX.connectUNIX>}
- and L{UNIXClientEndpoint<twisted.internet.endpoints.UNIXClientEndpoint>}).
-
- There is no correspondence between the integer value of the file descriptor
- on the sending and receiving sides, therefore an alternate approach is taken
- to matching up received descriptors with particular L{Descriptor}
- parameters. The argument is encoded to an ordinal (unique per connection)
- for inclusion in the AMP command or response box. The descriptor itself is
- sent using
- L{IUNIXTransport.sendFileDescriptor<twisted.internet.interfaces.IUNIXTransport.sendFileDescriptor>}.
- The receiver uses the order in which file descriptors are received and the
- ordinal value to come up with the received copy of the descriptor.
- """
- def fromStringProto(self, inString, proto):
- """
- Take a unique identifier associated with a file descriptor which must
- have been received by now and use it to look up that descriptor in a
- dictionary where they are kept.
-
- @param inString: The base representation (as a byte string) of an
- ordinal indicating which file descriptor corresponds to this usage
- of this argument.
- @type inString: C{str}
-
- @param proto: The protocol used to receive this descriptor. This
- protocol must be connected via a transport providing
- L{IUNIXTransport<twisted.internet.interfaces.IUNIXTransport>}.
- @type proto: L{BinaryBoxProtocol}
-
- @return: The file descriptor represented by C{inString}.
- @rtype: C{int}
- """
- return proto._getDescriptor(int(inString))
-
-
- def toStringProto(self, inObject, proto):
- """
- Send C{inObject}, an integer file descriptor, over C{proto}'s connection
- and return a unique identifier which will allow the receiver to
- associate the file descriptor with this argument.
-
- @param inObject: A file descriptor to duplicate over an AMP connection
- as the value for this argument.
- @type inObject: C{int}
-
- @param proto: The protocol which will be used to send this descriptor.
- This protocol must be connected via a transport providing
- L{IUNIXTransport<twisted.internet.interfaces.IUNIXTransport>}.
-
- @return: A byte string which can be used by the receiver to reconstruct
- the file descriptor.
- @type: C{str}
- """
- identifier = proto._sendFileDescriptor(inObject)
- outString = Integer.toStringProto(self, identifier, proto)
- return outString
-
-
-
-class Command:
- """
- Subclass me to specify an AMP Command.
-
- @cvar arguments: A list of 2-tuples of (name, Argument-subclass-instance),
- specifying the names and values of the parameters which are required for
- this command.
-
- @cvar response: A list like L{arguments}, but instead used for the return
- value.
-
- @cvar errors: A mapping of subclasses of L{Exception} to wire-protocol tags
- for errors represented as L{str}s. Responders which raise keys from this
- dictionary will have the error translated to the corresponding tag on the
- wire. Invokers which receive Deferreds from invoking this command with
- L{AMP.callRemote} will potentially receive Failures with keys from this
- mapping as their value. This mapping is inherited; if you declare a
- command which handles C{FooError} as 'FOO_ERROR', then subclass it and
- specify C{BarError} as 'BAR_ERROR', responders to the subclass may raise
- either C{FooError} or C{BarError}, and invokers must be able to deal with
- either of those exceptions.
-
- @cvar fatalErrors: like 'errors', but errors in this list will always
- terminate the connection, despite being of a recognizable error type.
-
- @cvar commandType: The type of Box used to issue commands; useful only for
- protocol-modifying behavior like startTLS or protocol switching. Defaults
- to a plain vanilla L{Box}.
-
- @cvar responseType: The type of Box used to respond to this command; only
- useful for protocol-modifying behavior like startTLS or protocol switching.
- Defaults to a plain vanilla L{Box}.
-
- @ivar requiresAnswer: a boolean; defaults to True. Set it to False on your
- subclass if you want callRemote to return None. Note: this is a hint only
- to the client side of the protocol. The return-type of a command responder
- method must always be a dictionary adhering to the contract specified by
- L{response}, because clients are always free to request a response if they
- want one.
- """
-
- class __metaclass__(type):
- """
- Metaclass hack to establish reverse-mappings for 'errors' and
- 'fatalErrors' as class vars.
- """
- def __new__(cls, name, bases, attrs):
- reverseErrors = attrs['reverseErrors'] = {}
- er = attrs['allErrors'] = {}
- if 'commandName' not in attrs:
- attrs['commandName'] = name
- newtype = type.__new__(cls, name, bases, attrs)
- errors = {}
- fatalErrors = {}
- accumulateClassDict(newtype, 'errors', errors)
- accumulateClassDict(newtype, 'fatalErrors', fatalErrors)
- for v, k in errors.iteritems():
- reverseErrors[k] = v
- er[v] = k
- for v, k in fatalErrors.iteritems():
- reverseErrors[k] = v
- er[v] = k
- return newtype
-
- arguments = []
- response = []
- extra = []
- errors = {}
- fatalErrors = {}
-
- commandType = Box
- responseType = Box
-
- requiresAnswer = True
-
-
- def __init__(self, **kw):
- """
- Create an instance of this command with specified values for its
- parameters.
-
- @param kw: a dict containing an appropriate value for each name
- specified in the L{arguments} attribute of my class.
-
- @raise InvalidSignature: if you forgot any required arguments.
- """
- self.structured = kw
- givenArgs = kw.keys()
- forgotten = []
- for name, arg in self.arguments:
- pythonName = _wireNameToPythonIdentifier(name)
- if pythonName not in givenArgs and not arg.optional:
- forgotten.append(pythonName)
- if forgotten:
- raise InvalidSignature("forgot %s for %s" % (
- ', '.join(forgotten), self.commandName))
- forgotten = []
-
-
- def makeResponse(cls, objects, proto):
- """
- Serialize a mapping of arguments using this L{Command}'s
- response schema.
-
- @param objects: a dict with keys matching the names specified in
- self.response, having values of the types that the Argument objects in
- self.response can format.
-
- @param proto: an L{AMP}.
-
- @return: an L{AmpBox}.
- """
- try:
- responseType = cls.responseType()
- except:
- return fail()
- return _objectsToStrings(objects, cls.response, responseType, proto)
- makeResponse = classmethod(makeResponse)
-
-
- def makeArguments(cls, objects, proto):
- """
- Serialize a mapping of arguments using this L{Command}'s
- argument schema.
-
- @param objects: a dict with keys similar to the names specified in
- self.arguments, having values of the types that the Argument objects in
- self.arguments can parse.
-
- @param proto: an L{AMP}.
-
- @return: An instance of this L{Command}'s C{commandType}.
- """
- allowedNames = set()
- for (argName, ignored) in cls.arguments:
- allowedNames.add(_wireNameToPythonIdentifier(argName))
-
- for intendedArg in objects:
- if intendedArg not in allowedNames:
- raise InvalidSignature(
- "%s is not a valid argument" % (intendedArg,))
- return _objectsToStrings(objects, cls.arguments, cls.commandType(),
- proto)
- makeArguments = classmethod(makeArguments)
-
-
- def parseResponse(cls, box, protocol):
- """
- Parse a mapping of serialized arguments using this
- L{Command}'s response schema.
-
- @param box: A mapping of response-argument names to the
- serialized forms of those arguments.
- @param protocol: The L{AMP} protocol.
-
- @return: A mapping of response-argument names to the parsed
- forms.
- """
- return _stringsToObjects(box, cls.response, protocol)
- parseResponse = classmethod(parseResponse)
-
-
- def parseArguments(cls, box, protocol):
- """
- Parse a mapping of serialized arguments using this
- L{Command}'s argument schema.
-
- @param box: A mapping of argument names to the seralized forms
- of those arguments.
- @param protocol: The L{AMP} protocol.
-
- @return: A mapping of argument names to the parsed forms.
- """
- return _stringsToObjects(box, cls.arguments, protocol)
- parseArguments = classmethod(parseArguments)
-
-
- def responder(cls, methodfunc):
- """
- Declare a method to be a responder for a particular command.
-
- This is a decorator.
-
- Use like so::
-
- class MyCommand(Command):
- arguments = [('a', ...), ('b', ...)]
-
- class MyProto(AMP):
- def myFunMethod(self, a, b):
- ...
- MyCommand.responder(myFunMethod)
-
- Notes: Although decorator syntax is not used within Twisted, this
- function returns its argument and is therefore safe to use with
- decorator syntax.
-
- This is not thread safe. Don't declare AMP subclasses in other
- threads. Don't declare responders outside the scope of AMP subclasses;
- the behavior is undefined.
-
- @param methodfunc: A function which will later become a method, which
- has a keyword signature compatible with this command's L{argument} list
- and returns a dictionary with a set of keys compatible with this
- command's L{response} list.
-
- @return: the methodfunc parameter.
- """
- CommandLocator._currentClassCommands.append((cls, methodfunc))
- return methodfunc
- responder = classmethod(responder)
-
-
- # Our only instance method
- def _doCommand(self, proto):
- """
- Encode and send this Command to the given protocol.
-
- @param proto: an AMP, representing the connection to send to.
-
- @return: a Deferred which will fire or error appropriately when the
- other side responds to the command (or error if the connection is lost
- before it is responded to).
- """
-
- def _massageError(error):
- error.trap(RemoteAmpError)
- rje = error.value
- errorType = self.reverseErrors.get(rje.errorCode,
- UnknownRemoteError)
- return Failure(errorType(rje.description))
-
- d = proto._sendBoxCommand(self.commandName,
- self.makeArguments(self.structured, proto),
- self.requiresAnswer)
-
- if self.requiresAnswer:
- d.addCallback(self.parseResponse, proto)
- d.addErrback(_massageError)
-
- return d
-
-
-
-class _NoCertificate:
- """
- This is for peers which don't want to use a local certificate. Used by
- AMP because AMP's internal language is all about certificates and this
- duck-types in the appropriate place; this API isn't really stable though,
- so it's not exposed anywhere public.
-
- For clients, it will use ephemeral DH keys, or whatever the default is for
- certificate-less clients in OpenSSL. For servers, it will generate a
- temporary self-signed certificate with garbage values in the DN and use
- that.
- """
-
- def __init__(self, client):
- """
- Create a _NoCertificate which either is or isn't for the client side of
- the connection.
-
- @param client: True if we are a client and should truly have no
- certificate and be anonymous, False if we are a server and actually
- have to generate a temporary certificate.
-
- @type client: bool
- """
- self.client = client
-
-
- def options(self, *authorities):
- """
- Behaves like L{twisted.internet.ssl.PrivateCertificate.options}().
- """
- if not self.client:
- # do some crud with sslverify to generate a temporary self-signed
- # certificate. This is SLOOOWWWWW so it is only in the absolute
- # worst, most naive case.
-
- # We have to do this because OpenSSL will not let both the server
- # and client be anonymous.
- sharedDN = DN(CN='TEMPORARY CERTIFICATE')
- key = KeyPair.generate()
- cr = key.certificateRequest(sharedDN)
- sscrd = key.signCertificateRequest(sharedDN, cr, lambda dn: True, 1)
- cert = key.newCertificate(sscrd)
- return cert.options(*authorities)
- options = dict()
- if authorities:
- options.update(dict(verify=True,
- requireCertificate=True,
- caCerts=[auth.original for auth in authorities]))
- occo = CertificateOptions(**options)
- return occo
-
-
-
-class _TLSBox(AmpBox):
- """
- I am an AmpBox that, upon being sent, initiates a TLS connection.
- """
- __slots__ = []
-
- def __init__(self):
- if ssl is None:
- raise RemoteAmpError("TLS_ERROR", "TLS not available")
- AmpBox.__init__(self)
-
-
- def _keyprop(k, default):
- return property(lambda self: self.get(k, default))
-
-
- # These properties are described in startTLS
- certificate = _keyprop('tls_localCertificate', _NoCertificate(False))
- verify = _keyprop('tls_verifyAuthorities', None)
-
- def _sendTo(self, proto):
- """
- Send my encoded value to the protocol, then initiate TLS.
- """
- ab = AmpBox(self)
- for k in ['tls_localCertificate',
- 'tls_verifyAuthorities']:
- ab.pop(k, None)
- ab._sendTo(proto)
- proto._startTLS(self.certificate, self.verify)
-
-
-
-class _LocalArgument(String):
- """
- Local arguments are never actually relayed across the wire. This is just a
- shim so that StartTLS can pretend to have some arguments: if arguments
- acquire documentation properties, replace this with something nicer later.
- """
-
- def fromBox(self, name, strings, objects, proto):
- pass
-
-
-
-class StartTLS(Command):
- """
- Use, or subclass, me to implement a command that starts TLS.
-
- Callers of StartTLS may pass several special arguments, which affect the
- TLS negotiation:
-
- - tls_localCertificate: This is a
- twisted.internet.ssl.PrivateCertificate which will be used to secure
- the side of the connection it is returned on.
-
- - tls_verifyAuthorities: This is a list of
- twisted.internet.ssl.Certificate objects that will be used as the
- certificate authorities to verify our peer's certificate.
-
- Each of those special parameters may also be present as a key in the
- response dictionary.
- """
-
- arguments = [("tls_localCertificate", _LocalArgument(optional=True)),
- ("tls_verifyAuthorities", _LocalArgument(optional=True))]
-
- response = [("tls_localCertificate", _LocalArgument(optional=True)),
- ("tls_verifyAuthorities", _LocalArgument(optional=True))]
-
- responseType = _TLSBox
-
- def __init__(self, **kw):
- """
- Create a StartTLS command. (This is private. Use AMP.callRemote.)
-
- @param tls_localCertificate: the PrivateCertificate object to use to
- secure the connection. If it's None, or unspecified, an ephemeral DH
- key is used instead.
-
- @param tls_verifyAuthorities: a list of Certificate objects which
- represent root certificates to verify our peer with.
- """
- if ssl is None:
- raise RuntimeError("TLS not available.")
- self.certificate = kw.pop('tls_localCertificate', _NoCertificate(True))
- self.authorities = kw.pop('tls_verifyAuthorities', None)
- Command.__init__(self, **kw)
-
-
- def _doCommand(self, proto):
- """
- When a StartTLS command is sent, prepare to start TLS, but don't actually
- do it; wait for the acknowledgement, then initiate the TLS handshake.
- """
- d = Command._doCommand(self, proto)
- proto._prepareTLS(self.certificate, self.authorities)
- # XXX before we get back to user code we are going to start TLS...
- def actuallystart(response):
- proto._startTLS(self.certificate, self.authorities)
- return response
- d.addCallback(actuallystart)
- return d
-
-
-
-class ProtocolSwitchCommand(Command):
- """
- Use this command to switch from something Amp-derived to a different
- protocol mid-connection. This can be useful to use amp as the
- connection-startup negotiation phase. Since TLS is a different layer
- entirely, you can use Amp to negotiate the security parameters of your
- connection, then switch to a different protocol, and the connection will
- remain secured.
- """
-
- def __init__(self, _protoToSwitchToFactory, **kw):
- """
- Create a ProtocolSwitchCommand.
-
- @param _protoToSwitchToFactory: a ProtocolFactory which will generate
- the Protocol to switch to.
-
- @param kw: Keyword arguments, encoded and handled normally as
- L{Command} would.
- """
-
- self.protoToSwitchToFactory = _protoToSwitchToFactory
- super(ProtocolSwitchCommand, self).__init__(**kw)
-
-
- def makeResponse(cls, innerProto, proto):
- return _SwitchBox(innerProto)
- makeResponse = classmethod(makeResponse)
-
-
- def _doCommand(self, proto):
- """
- When we emit a ProtocolSwitchCommand, lock the protocol, but don't actually
- switch to the new protocol unless an acknowledgement is received. If
- an error is received, switch back.
- """
- d = super(ProtocolSwitchCommand, self)._doCommand(proto)
- proto._lockForSwitch()
- def switchNow(ign):
- innerProto = self.protoToSwitchToFactory.buildProtocol(
- proto.transport.getPeer())
- proto._switchTo(innerProto, self.protoToSwitchToFactory)
- return ign
- def handle(ign):
- proto._unlockFromSwitch()
- self.protoToSwitchToFactory.clientConnectionFailed(
- None, Failure(CONNECTION_LOST))
- return ign
- return d.addCallbacks(switchNow, handle)
-
-
-
-class _DescriptorExchanger(object):
- """
- L{_DescriptorExchanger} is a mixin for L{BinaryBoxProtocol} which adds
- support for receiving file descriptors, a feature offered by
- L{IUNIXTransport<twisted.internet.interfaces.IUNIXTransport>}.
-
- @ivar _descriptors: Temporary storage for all file descriptors received.
- Values in this dictionary are the file descriptors (as integers). Keys
- in this dictionary are ordinals giving the order in which each
- descriptor was received. The ordering information is used to allow
- L{Descriptor} to determine which is the correct descriptor for any
- particular usage of that argument type.
- @type _descriptors: C{dict}
-
- @ivar _sendingDescriptorCounter: A no-argument callable which returns the
- ordinals, starting from 0. This is used to construct values for
- C{_sendFileDescriptor}.
-
- @ivar _receivingDescriptorCounter: A no-argument callable which returns the
- ordinals, starting from 0. This is used to construct values for
- C{fileDescriptorReceived}.
- """
- implements(IFileDescriptorReceiver)
-
- def __init__(self):
- self._descriptors = {}
- self._getDescriptor = self._descriptors.pop
- self._sendingDescriptorCounter = count().next
- self._receivingDescriptorCounter = count().next
-
-
- def _sendFileDescriptor(self, descriptor):
- """
- Assign and return the next ordinal to the given descriptor after sending
- the descriptor over this protocol's transport.
- """
- self.transport.sendFileDescriptor(descriptor)
- return self._sendingDescriptorCounter()
-
-
- def fileDescriptorReceived(self, descriptor):
- """
- Collect received file descriptors to be claimed later by L{Descriptor}.
-
- @param descriptor: The received file descriptor.
- @type descriptor: C{int}
- """
- self._descriptors[self._receivingDescriptorCounter()] = descriptor
-
-
-
-class BinaryBoxProtocol(StatefulStringProtocol, Int16StringReceiver,
- _DescriptorExchanger):
- """
- A protocol for receiving L{AmpBox}es - key/value pairs - via length-prefixed
- strings. A box is composed of:
-
- - any number of key-value pairs, described by:
- - a 2-byte network-endian packed key length (of which the first
- byte must be null, and the second must be non-null: i.e. the
- value of the length must be 1-255)
- - a key, comprised of that many bytes
- - a 2-byte network-endian unsigned value length (up to the maximum
- of 65535)
- - a value, comprised of that many bytes
- - 2 null bytes
-
- In other words, an even number of strings prefixed with packed unsigned
- 16-bit integers, and then a 0-length string to indicate the end of the box.
-
- This protocol also implements 2 extra private bits of functionality related
- to the byte boundaries between messages; it can start TLS between two given
- boxes or switch to an entirely different protocol. However, due to some
- tricky elements of the implementation, the public interface to this
- functionality is L{ProtocolSwitchCommand} and L{StartTLS}.
-
- @ivar _keyLengthLimitExceeded: A flag which is only true when the
- connection is being closed because a key length prefix which was longer
- than allowed by the protocol was received.
-
- @ivar boxReceiver: an L{IBoxReceiver} provider, whose L{ampBoxReceived}
- method will be invoked for each L{AmpBox} that is received.
- """
-
- implements(IBoxSender)
-
- _justStartedTLS = False
- _startingTLSBuffer = None
- _locked = False
- _currentKey = None
- _currentBox = None
-
- _keyLengthLimitExceeded = False
-
- hostCertificate = None
- noPeerCertificate = False # for tests
- innerProtocol = None
- innerProtocolClientFactory = None
-
- def __init__(self, boxReceiver):
- _DescriptorExchanger.__init__(self)
- self.boxReceiver = boxReceiver
-
-
- def _switchTo(self, newProto, clientFactory=None):
- """
- Switch this BinaryBoxProtocol's transport to a new protocol. You need
- to do this 'simultaneously' on both ends of a connection; the easiest
- way to do this is to use a subclass of ProtocolSwitchCommand.
-
- @param newProto: the new protocol instance to switch to.
-
- @param clientFactory: the ClientFactory to send the
- L{clientConnectionLost} notification to.
- """
- # All the data that Int16Receiver has not yet dealt with belongs to our
- # new protocol: luckily it's keeping that in a handy (although
- # ostensibly internal) variable for us:
- newProtoData = self.recvd
- # We're quite possibly in the middle of a 'dataReceived' loop in
- # Int16StringReceiver: let's make sure that the next iteration, the
- # loop will break and not attempt to look at something that isn't a
- # length prefix.
- self.recvd = ''
- # Finally, do the actual work of setting up the protocol and delivering
- # its first chunk of data, if one is available.
- self.innerProtocol = newProto
- self.innerProtocolClientFactory = clientFactory
- newProto.makeConnection(self.transport)
- if newProtoData:
- newProto.dataReceived(newProtoData)
-
-
- def sendBox(self, box):
- """
- Send a amp.Box to my peer.
-
- Note: transport.write is never called outside of this method.
-
- @param box: an AmpBox.
-
- @raise ProtocolSwitched: if the protocol has previously been switched.
-
- @raise ConnectionLost: if the connection has previously been lost.
- """
- if self._locked:
- raise ProtocolSwitched(
- "This connection has switched: no AMP traffic allowed.")
- if self.transport is None:
- raise ConnectionLost()
- if self._startingTLSBuffer is not None:
- self._startingTLSBuffer.append(box)
- else:
- self.transport.write(box.serialize())
-
-
- def makeConnection(self, transport):
- """
- Notify L{boxReceiver} that it is about to receive boxes from this
- protocol by invoking L{startReceivingBoxes}.
- """
- self.transport = transport
- self.boxReceiver.startReceivingBoxes(self)
- self.connectionMade()
-
-
- def dataReceived(self, data):
- """
- Either parse incoming data as L{AmpBox}es or relay it to our nested
- protocol.
- """
- if self._justStartedTLS:
- self._justStartedTLS = False
- # If we already have an inner protocol, then we don't deliver data to
- # the protocol parser any more; we just hand it off.
- if self.innerProtocol is not None:
- self.innerProtocol.dataReceived(data)
- return
- return Int16StringReceiver.dataReceived(self, data)
-
-
- def connectionLost(self, reason):
- """
- The connection was lost; notify any nested protocol.
- """
- if self.innerProtocol is not None:
- self.innerProtocol.connectionLost(reason)
- if self.innerProtocolClientFactory is not None:
- self.innerProtocolClientFactory.clientConnectionLost(None, reason)
- if self._keyLengthLimitExceeded:
- failReason = Failure(TooLong(True, False, None, None))
- elif reason.check(ConnectionClosed) and self._justStartedTLS:
- # We just started TLS and haven't received any data. This means
- # the other connection didn't like our cert (although they may not
- # have told us why - later Twisted should make 'reason' into a TLS
- # error.)
- failReason = PeerVerifyError(
- "Peer rejected our certificate for an unknown reason.")
- else:
- failReason = reason
- self.boxReceiver.stopReceivingBoxes(failReason)
-
-
- # The longest key allowed
- _MAX_KEY_LENGTH = 255
-
- # The longest value allowed (this is somewhat redundant, as longer values
- # cannot be encoded - ah well).
- _MAX_VALUE_LENGTH = 65535
-
- # The first thing received is a key.
- MAX_LENGTH = _MAX_KEY_LENGTH
-
- def proto_init(self, string):
- """
- String received in the 'init' state.
- """
- self._currentBox = AmpBox()
- return self.proto_key(string)
-
-
- def proto_key(self, string):
- """
- String received in the 'key' state. If the key is empty, a complete
- box has been received.
- """
- if string:
- self._currentKey = string
- self.MAX_LENGTH = self._MAX_VALUE_LENGTH
- return 'value'
- else:
- self.boxReceiver.ampBoxReceived(self._currentBox)
- self._currentBox = None
- return 'init'
-
-
- def proto_value(self, string):
- """
- String received in the 'value' state.
- """
- self._currentBox[self._currentKey] = string
- self._currentKey = None
- self.MAX_LENGTH = self._MAX_KEY_LENGTH
- return 'key'
-
-
- def lengthLimitExceeded(self, length):
- """
- The key length limit was exceeded. Disconnect the transport and make
- sure a meaningful exception is reported.
- """
- self._keyLengthLimitExceeded = True
- self.transport.loseConnection()
-
-
- def _lockForSwitch(self):
- """
- Lock this binary protocol so that no further boxes may be sent. This
- is used when sending a request to switch underlying protocols. You
- probably want to subclass ProtocolSwitchCommand rather than calling
- this directly.
- """
- self._locked = True
-
-
- def _unlockFromSwitch(self):
- """
- Unlock this locked binary protocol so that further boxes may be sent
- again. This is used after an attempt to switch protocols has failed
- for some reason.
- """
- if self.innerProtocol is not None:
- raise ProtocolSwitched("Protocol already switched. Cannot unlock.")
- self._locked = False
-
-
- def _prepareTLS(self, certificate, verifyAuthorities):
- """
- Used by StartTLSCommand to put us into the state where we don't
- actually send things that get sent, instead we buffer them. see
- L{_sendBox}.
- """
- self._startingTLSBuffer = []
- if self.hostCertificate is not None:
- raise OnlyOneTLS(
- "Previously authenticated connection between %s and %s "
- "is trying to re-establish as %s" % (
- self.hostCertificate,
- self.peerCertificate,
- (certificate, verifyAuthorities)))
-
-
- def _startTLS(self, certificate, verifyAuthorities):
- """
- Used by TLSBox to initiate the SSL handshake.
-
- @param certificate: a L{twisted.internet.ssl.PrivateCertificate} for
- use locally.
-
- @param verifyAuthorities: L{twisted.internet.ssl.Certificate} instances
- representing certificate authorities which will verify our peer.
- """
- self.hostCertificate = certificate
- self._justStartedTLS = True
- if verifyAuthorities is None:
- verifyAuthorities = ()
- self.transport.startTLS(certificate.options(*verifyAuthorities))
- stlsb = self._startingTLSBuffer
- if stlsb is not None:
- self._startingTLSBuffer = None
- for box in stlsb:
- self.sendBox(box)
-
-
- def _getPeerCertificate(self):
- if self.noPeerCertificate:
- return None
- return Certificate.peerFromTransport(self.transport)
- peerCertificate = property(_getPeerCertificate)
-
-
- def unhandledError(self, failure):
- """
- The buck stops here. This error was completely unhandled, time to
- terminate the connection.
- """
- log.err(
- failure,
- "Amp server or network failure unhandled by client application. "
- "Dropping connection! To avoid, add errbacks to ALL remote "
- "commands!")
- if self.transport is not None:
- self.transport.loseConnection()
-
-
- def _defaultStartTLSResponder(self):
- """
- The default TLS responder doesn't specify any certificate or anything.
-
- From a security perspective, it's little better than a plain-text
- connection - but it is still a *bit* better, so it's included for
- convenience.
-
- You probably want to override this by providing your own StartTLS.responder.
- """
- return {}
- StartTLS.responder(_defaultStartTLSResponder)
-
-
-
-class AMP(BinaryBoxProtocol, BoxDispatcher,
- CommandLocator, SimpleStringLocator):
- """
- This protocol is an AMP connection. See the module docstring for protocol
- details.
- """
-
- _ampInitialized = False
-
- def __init__(self, boxReceiver=None, locator=None):
- # For backwards compatibility. When AMP did not separate parsing logic
- # (L{BinaryBoxProtocol}), request-response logic (L{BoxDispatcher}) and
- # command routing (L{CommandLocator}), it did not have a constructor.
- # Now it does, so old subclasses might have defined their own that did
- # not upcall. If this flag isn't set, we'll call the constructor in
- # makeConnection before anything actually happens.
- self._ampInitialized = True
- if boxReceiver is None:
- boxReceiver = self
- if locator is None:
- locator = self
- BoxDispatcher.__init__(self, locator)
- BinaryBoxProtocol.__init__(self, boxReceiver)
-
-
- def locateResponder(self, name):
- """
- Unify the implementations of L{CommandLocator} and
- L{SimpleStringLocator} to perform both kinds of dispatch, preferring
- L{CommandLocator}.
- """
- firstResponder = CommandLocator.locateResponder(self, name)
- if firstResponder is not None:
- return firstResponder
- secondResponder = SimpleStringLocator.locateResponder(self, name)
- return secondResponder
-
-
- def __repr__(self):
- """
- A verbose string representation which gives us information about this
- AMP connection.
- """
- if self.innerProtocol is not None:
- innerRepr = ' inner %r' % (self.innerProtocol,)
- else:
- innerRepr = ''
- return '<%s%s at 0x%x>' % (
- self.__class__.__name__, innerRepr, unsignedID(self))
-
-
- def makeConnection(self, transport):
- """
- Emit a helpful log message when the connection is made.
- """
- if not self._ampInitialized:
- # See comment in the constructor re: backward compatibility. I
- # should probably emit a deprecation warning here.
- AMP.__init__(self)
- # Save these so we can emit a similar log message in L{connectionLost}.
- self._transportPeer = transport.getPeer()
- self._transportHost = transport.getHost()
- log.msg("%s connection established (HOST:%s PEER:%s)" % (
- self.__class__.__name__,
- self._transportHost,
- self._transportPeer))
- BinaryBoxProtocol.makeConnection(self, transport)
-
-
- def connectionLost(self, reason):
- """
- Emit a helpful log message when the connection is lost.
- """
- log.msg("%s connection lost (HOST:%s PEER:%s)" %
- (self.__class__.__name__,
- self._transportHost,
- self._transportPeer))
- BinaryBoxProtocol.connectionLost(self, reason)
- self.transport = None
-
-
-
-class _ParserHelper:
- """
- A box receiver which records all boxes received.
- """
- def __init__(self):
- self.boxes = []
-
-
- def getPeer(self):
- return 'string'
-
-
- def getHost(self):
- return 'string'
-
- disconnecting = False
-
-
- def startReceivingBoxes(self, sender):
- """
- No initialization is required.
- """
-
-
- def ampBoxReceived(self, box):
- self.boxes.append(box)
-
-
- # Synchronous helpers
- def parse(cls, fileObj):
- """
- Parse some amp data stored in a file.
-
- @param fileObj: a file-like object.
-
- @return: a list of AmpBoxes encoded in the given file.
- """
- parserHelper = cls()
- bbp = BinaryBoxProtocol(boxReceiver=parserHelper)
- bbp.makeConnection(parserHelper)
- bbp.dataReceived(fileObj.read())
- return parserHelper.boxes
- parse = classmethod(parse)
-
-
- def parseString(cls, data):
- """
- Parse some amp data stored in a string.
-
- @param data: a str holding some amp-encoded data.
-
- @return: a list of AmpBoxes encoded in the given string.
- """
- return cls.parse(StringIO(data))
- parseString = classmethod(parseString)
-
-
-
-parse = _ParserHelper.parse
-parseString = _ParserHelper.parseString
-
-def _stringsToObjects(strings, arglist, proto):
- """
- Convert an AmpBox to a dictionary of python objects, converting through a
- given arglist.
-
- @param strings: an AmpBox (or dict of strings)
-
- @param arglist: a list of 2-tuples of strings and Argument objects, as
- described in L{Command.arguments}.
-
- @param proto: an L{AMP} instance.
-
- @return: the converted dictionary mapping names to argument objects.
- """
- objects = {}
- myStrings = strings.copy()
- for argname, argparser in arglist:
- argparser.fromBox(argname, myStrings, objects, proto)
- return objects
-
-
-
-def _objectsToStrings(objects, arglist, strings, proto):
- """
- Convert a dictionary of python objects to an AmpBox, converting through a
- given arglist.
-
- @param objects: a dict mapping names to python objects
-
- @param arglist: a list of 2-tuples of strings and Argument objects, as
- described in L{Command.arguments}.
-
- @param strings: [OUT PARAMETER] An object providing the L{dict}
- interface which will be populated with serialized data.
-
- @param proto: an L{AMP} instance.
-
- @return: The converted dictionary mapping names to encoded argument
- strings (identical to C{strings}).
- """
- myObjects = objects.copy()
- for argname, argparser in arglist:
- argparser.toBox(argname, strings, myObjects, proto)
- return strings
-
-
-
-class _FixedOffsetTZInfo(datetime.tzinfo):
- """
- Represents a fixed timezone offset (without daylight saving time).
-
- @ivar name: A C{str} giving the name of this timezone; the name just
- includes how much time this offset represents.
-
- @ivar offset: A C{datetime.timedelta} giving the amount of time this
- timezone is offset.
- """
-
- def __init__(self, sign, hours, minutes):
- self.name = '%s%02i:%02i' % (sign, hours, minutes)
- if sign == '-':
- hours = -hours
- minutes = -minutes
- elif sign != '+':
- raise ValueError('invalid sign for timezone %r' % (sign,))
- self.offset = datetime.timedelta(hours=hours, minutes=minutes)
-
-
- def utcoffset(self, dt):
- """
- Return this timezone's offset from UTC.
- """
- return self.offset
-
-
- def dst(self, dt):
- """
- Return a zero C{datetime.timedelta} for the daylight saving time offset,
- since there is never one.
- """
- return datetime.timedelta(0)
-
-
- def tzname(self, dt):
- """
- Return a string describing this timezone.
- """
- return self.name
-
-
-
-utc = _FixedOffsetTZInfo('+', 0, 0)
-
-
-
-class Decimal(Argument):
- """
- Encodes C{decimal.Decimal} instances.
-
- There are several ways in which a decimal value might be encoded.
-
- Special values are encoded as special strings::
-
- - Positive infinity is encoded as C{"Infinity"}
- - Negative infinity is encoded as C{"-Infinity"}
- - Quiet not-a-number is encoded as either C{"NaN"} or C{"-NaN"}
- - Signalling not-a-number is encoded as either C{"sNaN"} or C{"-sNaN"}
-
- Normal values are encoded using the base ten string representation, using
- engineering notation to indicate magnitude without precision, and "normal"
- digits to indicate precision. For example::
-
- - C{"1"} represents the value I{1} with precision to one place.
- - C{"-1"} represents the value I{-1} with precision to one place.
- - C{"1.0"} represents the value I{1} with precision to two places.
- - C{"10"} represents the value I{10} with precision to two places.
- - C{"1E+2"} represents the value I{10} with precision to one place.
- - C{"1E-1"} represents the value I{0.1} with precision to one place.
- - C{"1.5E+2"} represents the value I{15} with precision to two places.
-
- U{http://speleotrove.com/decimal/} should be considered the authoritative
- specification for the format.
- """
- fromString = decimal.Decimal
-
- def toString(self, inObject):
- """
- Serialize a C{decimal.Decimal} instance to the specified wire format.
- """
- if isinstance(inObject, decimal.Decimal):
- # Hopefully decimal.Decimal.__str__ actually does what we want.
- return str(inObject)
- raise ValueError(
- "amp.Decimal can only encode instances of decimal.Decimal")
-
-
-
-class DateTime(Argument):
- """
- Encodes C{datetime.datetime} instances.
-
- Wire format: '%04i-%02i-%02iT%02i:%02i:%02i.%06i%s%02i:%02i'. Fields in
- order are: year, month, day, hour, minute, second, microsecond, timezone
- direction (+ or -), timezone hour, timezone minute. Encoded string is
- always exactly 32 characters long. This format is compatible with ISO 8601,
- but that does not mean all ISO 8601 dates can be accepted.
-
- Also, note that the datetime module's notion of a "timezone" can be
- complex, but the wire format includes only a fixed offset, so the
- conversion is not lossless. A lossless transmission of a C{datetime} instance
- is not feasible since the receiving end would require a Python interpreter.
-
- @ivar _positions: A sequence of slices giving the positions of various
- interesting parts of the wire format.
- """
-
- _positions = [
- slice(0, 4), slice(5, 7), slice(8, 10), # year, month, day
- slice(11, 13), slice(14, 16), slice(17, 19), # hour, minute, second
- slice(20, 26), # microsecond
- # intentionally skip timezone direction, as it is not an integer
- slice(27, 29), slice(30, 32) # timezone hour, timezone minute
- ]
-
- def fromString(self, s):
- """
- Parse a string containing a date and time in the wire format into a
- C{datetime.datetime} instance.
- """
- if len(s) != 32:
- raise ValueError('invalid date format %r' % (s,))
-
- values = [int(s[p]) for p in self._positions]
- sign = s[26]
- timezone = _FixedOffsetTZInfo(sign, *values[7:])
- values[7:] = [timezone]
- return datetime.datetime(*values)
-
-
- def toString(self, i):
- """
- Serialize a C{datetime.datetime} instance to a string in the specified
- wire format.
- """
- offset = i.utcoffset()
- if offset is None:
- raise ValueError(
- 'amp.DateTime cannot serialize naive datetime instances. '
- 'You may find amp.utc useful.')
-
- minutesOffset = (offset.days * 86400 + offset.seconds) // 60
-
- if minutesOffset > 0:
- sign = '+'
- else:
- sign = '-'
-
- # strftime has no way to format the microseconds, or put a ':' in the
- # timezone. Suprise!
-
- return '%04i-%02i-%02iT%02i:%02i:%02i.%06i%s%02i:%02i' % (
- i.year,
- i.month,
- i.day,
- i.hour,
- i.minute,
- i.second,
- i.microsecond,
- sign,
- abs(minutesOffset) // 60,
- abs(minutesOffset) % 60)
diff --git a/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/basic.py b/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/basic.py
deleted file mode 100755
index 7c4c9403..00000000
--- a/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/basic.py
+++ /dev/null
@@ -1,939 +0,0 @@
-# -*- test-case-name: twisted.test.test_protocols -*-
-# Copyright (c) Twisted Matrix Laboratories.
-# See LICENSE for details.
-
-
-"""
-Basic protocols, such as line-oriented, netstring, and int prefixed strings.
-
-Maintainer: Itamar Shtull-Trauring
-"""
-
-# System imports
-import re
-from struct import pack, unpack, calcsize
-import warnings
-import cStringIO
-import math
-
-from zope.interface import implements
-
-# Twisted imports
-from twisted.internet import protocol, defer, interfaces, error
-from twisted.python import log, deprecate, versions
-
-
-LENGTH, DATA, COMMA = range(3)
-NUMBER = re.compile('(\d*)(:?)')
-
-deprecatedSince = versions.Version("Twisted", 10, 2, 0)
-message = "NetstringReceiver parser state is private."
-for attr in ["LENGTH", "DATA", "COMMA", "NUMBER"]:
- deprecate.deprecatedModuleAttribute(
- deprecatedSince, message, __name__, attr)
-del deprecatedSince, message, attr
-
-DEBUG = 0
-
-class NetstringParseError(ValueError):
- """
- The incoming data is not in valid Netstring format.
- """
-
-
-
-class IncompleteNetstring(Exception):
- """
- Not enough data to complete a netstring.
- """
-
-
-
-class NetstringReceiver(protocol.Protocol):
- """
- A protocol that sends and receives netstrings.
-
- See U{http://cr.yp.to/proto/netstrings.txt} for the specification of
- netstrings. Every netstring starts with digits that specify the length
- of the data. This length specification is separated from the data by
- a colon. The data is terminated with a comma.
-
- Override L{stringReceived} to handle received netstrings. This
- method is called with the netstring payload as a single argument
- whenever a complete netstring is received.
-
- Security features:
- 1. Messages are limited in size, useful if you don't want
- someone sending you a 500MB netstring (change C{self.MAX_LENGTH}
- to the maximum length you wish to accept).
- 2. The connection is lost if an illegal message is received.
-
- @ivar MAX_LENGTH: Defines the maximum length of netstrings that can be
- received.
- @type MAX_LENGTH: C{int}
-
- @ivar _LENGTH: A pattern describing all strings that contain a netstring
- length specification. Examples for length specifications are '0:',
- '12:', and '179:'. '007:' is no valid length specification, since
- leading zeros are not allowed.
- @type _LENGTH: C{re.Match}
-
- @ivar _LENGTH_PREFIX: A pattern describing all strings that contain
- the first part of a netstring length specification (without the
- trailing comma). Examples are '0', '12', and '179'. '007' does not
- start a netstring length specification, since leading zeros are
- not allowed.
- @type _LENGTH_PREFIX: C{re.Match}
-
- @ivar _PARSING_LENGTH: Indicates that the C{NetstringReceiver} is in
- the state of parsing the length portion of a netstring.
- @type _PARSING_LENGTH: C{int}
-
- @ivar _PARSING_PAYLOAD: Indicates that the C{NetstringReceiver} is in
- the state of parsing the payload portion (data and trailing comma)
- of a netstring.
- @type _PARSING_PAYLOAD: C{int}
-
- @ivar brokenPeer: Indicates if the connection is still functional
- @type brokenPeer: C{int}
-
- @ivar _state: Indicates if the protocol is consuming the length portion
- (C{PARSING_LENGTH}) or the payload (C{PARSING_PAYLOAD}) of a netstring
- @type _state: C{int}
-
- @ivar _remainingData: Holds the chunk of data that has not yet been consumed
- @type _remainingData: C{string}
-
- @ivar _payload: Holds the payload portion of a netstring including the
- trailing comma
- @type _payload: C{cStringIO.StringIO}
-
- @ivar _expectedPayloadSize: Holds the payload size plus one for the trailing
- comma.
- @type _expectedPayloadSize: C{int}
- """
- MAX_LENGTH = 99999
- _LENGTH = re.compile('(0|[1-9]\d*)(:)')
-
- _LENGTH_PREFIX = re.compile('(0|[1-9]\d*)$')
-
- # Some error information for NetstringParseError instances.
- _MISSING_LENGTH = ("The received netstring does not start with a "
- "length specification.")
- _OVERFLOW = ("The length specification of the received netstring "
- "cannot be represented in Python - it causes an "
- "OverflowError!")
- _TOO_LONG = ("The received netstring is longer than the maximum %s "
- "specified by self.MAX_LENGTH")
- _MISSING_COMMA = "The received netstring is not terminated by a comma."
- _DATA_SUPPORT_DEPRECATED = ("Data passed to sendString() must be a string. "
- "Non-string support is deprecated since "
- "Twisted 10.0")
-
- # The following constants are used for determining if the NetstringReceiver
- # is parsing the length portion of a netstring, or the payload.
- _PARSING_LENGTH, _PARSING_PAYLOAD = range(2)
-
- def makeConnection(self, transport):
- """
- Initializes the protocol.
- """
- protocol.Protocol.makeConnection(self, transport)
- self._remainingData = ""
- self._currentPayloadSize = 0
- self._payload = cStringIO.StringIO()
- self._state = self._PARSING_LENGTH
- self._expectedPayloadSize = 0
- self.brokenPeer = 0
-
-
- def sendString(self, string):
- """
- Sends a netstring.
-
- Wraps up C{string} by adding length information and a
- trailing comma; writes the result to the transport.
-
- @param string: The string to send. The necessary framing (length
- prefix, etc) will be added.
- @type string: C{str}
- """
- if not isinstance(string, str):
- warnings.warn(self._DATA_SUPPORT_DEPRECATED, DeprecationWarning, 2)
- string = str(string)
- self.transport.write('%d:%s,' % (len(string), string))
-
-
- def dataReceived(self, data):
- """
- Receives some characters of a netstring.
-
- Whenever a complete netstring is received, this method extracts
- its payload and calls L{stringReceived} to process it.
-
- @param data: A chunk of data representing a (possibly partial)
- netstring
- @type data: C{str}
- """
- self._remainingData += data
- while self._remainingData:
- try:
- self._consumeData()
- except IncompleteNetstring:
- break
- except NetstringParseError:
- self._handleParseError()
- break
-
-
- def stringReceived(self, string):
- """
- Override this for notification when each complete string is received.
-
- @param string: The complete string which was received with all
- framing (length prefix, etc) removed.
- @type string: C{str}
-
- @raise NotImplementedError: because the method has to be implemented
- by the child class.
- """
- raise NotImplementedError()
-
-
- def _maxLengthSize(self):
- """
- Calculate and return the string size of C{self.MAX_LENGTH}.
-
- @return: The size of the string representation for C{self.MAX_LENGTH}
- @rtype: C{float}
- """
- return math.ceil(math.log10(self.MAX_LENGTH)) + 1
-
-
- def _consumeData(self):
- """
- Consumes the content of C{self._remainingData}.
-
- @raise IncompleteNetstring: if C{self._remainingData} does not
- contain enough data to complete the current netstring.
- @raise NetstringParseError: if the received data do not
- form a valid netstring.
- """
- if self._state == self._PARSING_LENGTH:
- self._consumeLength()
- self._prepareForPayloadConsumption()
- if self._state == self._PARSING_PAYLOAD:
- self._consumePayload()
-
-
- def _consumeLength(self):
- """
- Consumes the length portion of C{self._remainingData}.
-
- @raise IncompleteNetstring: if C{self._remainingData} contains
- a partial length specification (digits without trailing
- comma).
- @raise NetstringParseError: if the received data do not form a valid
- netstring.
- """
- lengthMatch = self._LENGTH.match(self._remainingData)
- if not lengthMatch:
- self._checkPartialLengthSpecification()
- raise IncompleteNetstring()
- self._processLength(lengthMatch)
-
-
- def _checkPartialLengthSpecification(self):
- """
- Makes sure that the received data represents a valid number.
-
- Checks if C{self._remainingData} represents a number smaller or
- equal to C{self.MAX_LENGTH}.
-
- @raise NetstringParseError: if C{self._remainingData} is no
- number or is too big (checked by L{extractLength}).
- """
- partialLengthMatch = self._LENGTH_PREFIX.match(self._remainingData)
- if not partialLengthMatch:
- raise NetstringParseError(self._MISSING_LENGTH)
- lengthSpecification = (partialLengthMatch.group(1))
- self._extractLength(lengthSpecification)
-
-
- def _processLength(self, lengthMatch):
- """
- Processes the length definition of a netstring.
-
- Extracts and stores in C{self._expectedPayloadSize} the number
- representing the netstring size. Removes the prefix
- representing the length specification from
- C{self._remainingData}.
-
- @raise NetstringParseError: if the received netstring does not
- start with a number or the number is bigger than
- C{self.MAX_LENGTH}.
- @param lengthMatch: A regular expression match object matching
- a netstring length specification
- @type lengthMatch: C{re.Match}
- """
- endOfNumber = lengthMatch.end(1)
- startOfData = lengthMatch.end(2)
- lengthString = self._remainingData[:endOfNumber]
- # Expect payload plus trailing comma:
- self._expectedPayloadSize = self._extractLength(lengthString) + 1
- self._remainingData = self._remainingData[startOfData:]
-
-
- def _extractLength(self, lengthAsString):
- """
- Attempts to extract the length information of a netstring.
-
- @raise NetstringParseError: if the number is bigger than
- C{self.MAX_LENGTH}.
- @param lengthAsString: A chunk of data starting with a length
- specification
- @type lengthAsString: C{str}
- @return: The length of the netstring
- @rtype: C{int}
- """
- self._checkStringSize(lengthAsString)
- length = int(lengthAsString)
- if length > self.MAX_LENGTH:
- raise NetstringParseError(self._TOO_LONG % (self.MAX_LENGTH,))
- return length
-
-
- def _checkStringSize(self, lengthAsString):
- """
- Checks the sanity of lengthAsString.
-
- Checks if the size of the length specification exceeds the
- size of the string representing self.MAX_LENGTH. If this is
- not the case, the number represented by lengthAsString is
- certainly bigger than self.MAX_LENGTH, and a
- NetstringParseError can be raised.
-
- This method should make sure that netstrings with extremely
- long length specifications are refused before even attempting
- to convert them to an integer (which might trigger a
- MemoryError).
- """
- if len(lengthAsString) > self._maxLengthSize():
- raise NetstringParseError(self._TOO_LONG % (self.MAX_LENGTH,))
-
-
- def _prepareForPayloadConsumption(self):
- """
- Sets up variables necessary for consuming the payload of a netstring.
- """
- self._state = self._PARSING_PAYLOAD
- self._currentPayloadSize = 0
- self._payload.seek(0)
- self._payload.truncate()
-
-
- def _consumePayload(self):
- """
- Consumes the payload portion of C{self._remainingData}.
-
- If the payload is complete, checks for the trailing comma and
- processes the payload. If not, raises an L{IncompleteNetstring}
- exception.
-
- @raise IncompleteNetstring: if the payload received so far
- contains fewer characters than expected.
- @raise NetstringParseError: if the payload does not end with a
- comma.
- """
- self._extractPayload()
- if self._currentPayloadSize < self._expectedPayloadSize:
- raise IncompleteNetstring()
- self._checkForTrailingComma()
- self._state = self._PARSING_LENGTH
- self._processPayload()
-
-
- def _extractPayload(self):
- """
- Extracts payload information from C{self._remainingData}.
-
- Splits C{self._remainingData} at the end of the netstring. The
- first part becomes C{self._payload}, the second part is stored
- in C{self._remainingData}.
-
- If the netstring is not yet complete, the whole content of
- C{self._remainingData} is moved to C{self._payload}.
- """
- if self._payloadComplete():
- remainingPayloadSize = (self._expectedPayloadSize -
- self._currentPayloadSize)
- self._payload.write(self._remainingData[:remainingPayloadSize])
- self._remainingData = self._remainingData[remainingPayloadSize:]
- self._currentPayloadSize = self._expectedPayloadSize
- else:
- self._payload.write(self._remainingData)
- self._currentPayloadSize += len(self._remainingData)
- self._remainingData = ""
-
-
- def _payloadComplete(self):
- """
- Checks if enough data have been received to complete the netstring.
-
- @return: C{True} iff the received data contain at least as many
- characters as specified in the length section of the
- netstring
- @rtype: C{bool}
- """
- return (len(self._remainingData) + self._currentPayloadSize >=
- self._expectedPayloadSize)
-
-
- def _processPayload(self):
- """
- Processes the actual payload with L{stringReceived}.
-
- Strips C{self._payload} of the trailing comma and calls
- L{stringReceived} with the result.
- """
- self.stringReceived(self._payload.getvalue()[:-1])
-
-
- def _checkForTrailingComma(self):
- """
- Checks if the netstring has a trailing comma at the expected position.
-
- @raise NetstringParseError: if the last payload character is
- anything but a comma.
- """
- if self._payload.getvalue()[-1] != ",":
- raise NetstringParseError(self._MISSING_COMMA)
-
-
- def _handleParseError(self):
- """
- Terminates the connection and sets the flag C{self.brokenPeer}.
- """
- self.transport.loseConnection()
- self.brokenPeer = 1
-
-
-
-class LineOnlyReceiver(protocol.Protocol):
- """
- A protocol that receives only lines.
-
- This is purely a speed optimisation over LineReceiver, for the
- cases that raw mode is known to be unnecessary.
-
- @cvar delimiter: The line-ending delimiter to use. By default this is
- '\\r\\n'.
- @cvar MAX_LENGTH: The maximum length of a line to allow (If a
- sent line is longer than this, the connection is dropped).
- Default is 16384.
- """
- _buffer = ''
- delimiter = '\r\n'
- MAX_LENGTH = 16384
-
- def dataReceived(self, data):
- """
- Translates bytes into lines, and calls lineReceived.
- """
- lines = (self._buffer+data).split(self.delimiter)
- self._buffer = lines.pop(-1)
- for line in lines:
- if self.transport.disconnecting:
- # this is necessary because the transport may be told to lose
- # the connection by a line within a larger packet, and it is
- # important to disregard all the lines in that packet following
- # the one that told it to close.
- return
- if len(line) > self.MAX_LENGTH:
- return self.lineLengthExceeded(line)
- else:
- self.lineReceived(line)
- if len(self._buffer) > self.MAX_LENGTH:
- return self.lineLengthExceeded(self._buffer)
-
-
- def lineReceived(self, line):
- """
- Override this for when each line is received.
-
- @param line: The line which was received with the delimiter removed.
- @type line: C{str}
- """
- raise NotImplementedError
-
-
- def sendLine(self, line):
- """
- Sends a line to the other end of the connection.
-
- @param line: The line to send, not including the delimiter.
- @type line: C{str}
- """
- return self.transport.writeSequence((line, self.delimiter))
-
-
- def lineLengthExceeded(self, line):
- """
- Called when the maximum line length has been reached.
- Override if it needs to be dealt with in some special way.
- """
- return error.ConnectionLost('Line length exceeded')
-
-
-
-class _PauseableMixin:
- paused = False
-
- def pauseProducing(self):
- self.paused = True
- self.transport.pauseProducing()
-
-
- def resumeProducing(self):
- self.paused = False
- self.transport.resumeProducing()
- self.dataReceived('')
-
-
- def stopProducing(self):
- self.paused = True
- self.transport.stopProducing()
-
-
-
-class LineReceiver(protocol.Protocol, _PauseableMixin):
- """
- A protocol that receives lines and/or raw data, depending on mode.
-
- In line mode, each line that's received becomes a callback to
- L{lineReceived}. In raw data mode, each chunk of raw data becomes a
- callback to L{rawDataReceived}. The L{setLineMode} and L{setRawMode}
- methods switch between the two modes.
-
- This is useful for line-oriented protocols such as IRC, HTTP, POP, etc.
-
- @cvar delimiter: The line-ending delimiter to use. By default this is
- '\\r\\n'.
- @cvar MAX_LENGTH: The maximum length of a line to allow (If a
- sent line is longer than this, the connection is dropped).
- Default is 16384.
- """
- line_mode = 1
- __buffer = ''
- delimiter = '\r\n'
- MAX_LENGTH = 16384
-
- def clearLineBuffer(self):
- """
- Clear buffered data.
-
- @return: All of the cleared buffered data.
- @rtype: C{str}
- """
- b = self.__buffer
- self.__buffer = ""
- return b
-
-
- def dataReceived(self, data):
- """
- Protocol.dataReceived.
- Translates bytes into lines, and calls lineReceived (or
- rawDataReceived, depending on mode.)
- """
- self.__buffer = self.__buffer+data
- while self.line_mode and not self.paused:
- try:
- line, self.__buffer = self.__buffer.split(self.delimiter, 1)
- except ValueError:
- if len(self.__buffer) > self.MAX_LENGTH:
- line, self.__buffer = self.__buffer, ''
- return self.lineLengthExceeded(line)
- break
- else:
- linelength = len(line)
- if linelength > self.MAX_LENGTH:
- exceeded = line + self.__buffer
- self.__buffer = ''
- return self.lineLengthExceeded(exceeded)
- why = self.lineReceived(line)
- if why or self.transport and self.transport.disconnecting:
- return why
- else:
- if not self.paused:
- data=self.__buffer
- self.__buffer=''
- if data:
- return self.rawDataReceived(data)
-
-
- def setLineMode(self, extra=''):
- """
- Sets the line-mode of this receiver.
-
- If you are calling this from a rawDataReceived callback,
- you can pass in extra unhandled data, and that data will
- be parsed for lines. Further data received will be sent
- to lineReceived rather than rawDataReceived.
-
- Do not pass extra data if calling this function from
- within a lineReceived callback.
- """
- self.line_mode = 1
- if extra:
- return self.dataReceived(extra)
-
-
- def setRawMode(self):
- """
- Sets the raw mode of this receiver.
- Further data received will be sent to rawDataReceived rather
- than lineReceived.
- """
- self.line_mode = 0
-
-
- def rawDataReceived(self, data):
- """
- Override this for when raw data is received.
- """
- raise NotImplementedError
-
-
- def lineReceived(self, line):
- """
- Override this for when each line is received.
-
- @param line: The line which was received with the delimiter removed.
- @type line: C{str}
- """
- raise NotImplementedError
-
-
- def sendLine(self, line):
- """
- Sends a line to the other end of the connection.
-
- @param line: The line to send, not including the delimiter.
- @type line: C{str}
- """
- return self.transport.write(line + self.delimiter)
-
-
- def lineLengthExceeded(self, line):
- """
- Called when the maximum line length has been reached.
- Override if it needs to be dealt with in some special way.
-
- The argument 'line' contains the remainder of the buffer, starting
- with (at least some part) of the line which is too long. This may
- be more than one line, or may be only the initial portion of the
- line.
- """
- return self.transport.loseConnection()
-
-
-
-class StringTooLongError(AssertionError):
- """
- Raised when trying to send a string too long for a length prefixed
- protocol.
- """
-
-
-
-class _RecvdCompatHack(object):
- """
- Emulates the to-be-deprecated C{IntNStringReceiver.recvd} attribute.
-
- The C{recvd} attribute was where the working buffer for buffering and
- parsing netstrings was kept. It was updated each time new data arrived and
- each time some of that data was parsed and delivered to application code.
- The piecemeal updates to its string value were expensive and have been
- removed from C{IntNStringReceiver} in the normal case. However, for
- applications directly reading this attribute, this descriptor restores that
- behavior. It only copies the working buffer when necessary (ie, when
- accessed). This avoids the cost for applications not using the data.
-
- This is a custom descriptor rather than a property, because we still need
- the default __set__ behavior in both new-style and old-style subclasses.
- """
- def __get__(self, oself, type=None):
- return oself._unprocessed[oself._compatibilityOffset:]
-
-
-
-class IntNStringReceiver(protocol.Protocol, _PauseableMixin):
- """
- Generic class for length prefixed protocols.
-
- @ivar _unprocessed: bytes received, but not yet broken up into messages /
- sent to stringReceived. _compatibilityOffset must be updated when this
- value is updated so that the C{recvd} attribute can be generated
- correctly.
- @type _unprocessed: C{bytes}
-
- @ivar structFormat: format used for struct packing/unpacking. Define it in
- subclass.
- @type structFormat: C{str}
-
- @ivar prefixLength: length of the prefix, in bytes. Define it in subclass,
- using C{struct.calcsize(structFormat)}
- @type prefixLength: C{int}
-
- @ivar _compatibilityOffset: the offset within C{_unprocessed} to the next
- message to be parsed. (used to generate the recvd attribute)
- @type _compatibilityOffset: C{int}
- """
-
- MAX_LENGTH = 99999
- _unprocessed = ""
- _compatibilityOffset = 0
-
- # Backwards compatibility support for applications which directly touch the
- # "internal" parse buffer.
- recvd = _RecvdCompatHack()
-
- def stringReceived(self, string):
- """
- Override this for notification when each complete string is received.
-
- @param string: The complete string which was received with all
- framing (length prefix, etc) removed.
- @type string: C{str}
- """
- raise NotImplementedError
-
-
- def lengthLimitExceeded(self, length):
- """
- Callback invoked when a length prefix greater than C{MAX_LENGTH} is
- received. The default implementation disconnects the transport.
- Override this.
-
- @param length: The length prefix which was received.
- @type length: C{int}
- """
- self.transport.loseConnection()
-
-
- def dataReceived(self, data):
- """
- Convert int prefixed strings into calls to stringReceived.
- """
- # Try to minimize string copying (via slices) by keeping one buffer
- # containing all the data we have so far and a separate offset into that
- # buffer.
- alldata = self._unprocessed + data
- currentOffset = 0
- prefixLength = self.prefixLength
- fmt = self.structFormat
- self._unprocessed = alldata
-
- while len(alldata) >= (currentOffset + prefixLength) and not self.paused:
- messageStart = currentOffset + prefixLength
- length, = unpack(fmt, alldata[currentOffset:messageStart])
- if length > self.MAX_LENGTH:
- self._unprocessed = alldata
- self._compatibilityOffset = currentOffset
- self.lengthLimitExceeded(length)
- return
- messageEnd = messageStart + length
- if len(alldata) < messageEnd:
- break
-
- # Here we have to slice the working buffer so we can send just the
- # netstring into the stringReceived callback.
- packet = alldata[messageStart:messageEnd]
- currentOffset = messageEnd
- self._compatibilityOffset = currentOffset
- self.stringReceived(packet)
-
- # Check to see if the backwards compat "recvd" attribute got written
- # to by application code. If so, drop the current data buffer and
- # switch to the new buffer given by that attribute's value.
- if 'recvd' in self.__dict__:
- alldata = self.__dict__.pop('recvd')
- self._unprocessed = alldata
- self._compatibilityOffset = currentOffset = 0
- if alldata:
- continue
- return
-
- # Slice off all the data that has been processed, avoiding holding onto
- # memory to store it, and update the compatibility attributes to reflect
- # that change.
- self._unprocessed = alldata[currentOffset:]
- self._compatibilityOffset = 0
-
-
- def sendString(self, string):
- """
- Send a prefixed string to the other end of the connection.
-
- @param string: The string to send. The necessary framing (length
- prefix, etc) will be added.
- @type string: C{str}
- """
- if len(string) >= 2 ** (8 * self.prefixLength):
- raise StringTooLongError(
- "Try to send %s bytes whereas maximum is %s" % (
- len(string), 2 ** (8 * self.prefixLength)))
- self.transport.write(
- pack(self.structFormat, len(string)) + string)
-
-
-
-class Int32StringReceiver(IntNStringReceiver):
- """
- A receiver for int32-prefixed strings.
-
- An int32 string is a string prefixed by 4 bytes, the 32-bit length of
- the string encoded in network byte order.
-
- This class publishes the same interface as NetstringReceiver.
- """
- structFormat = "!I"
- prefixLength = calcsize(structFormat)
-
-
-
-class Int16StringReceiver(IntNStringReceiver):
- """
- A receiver for int16-prefixed strings.
-
- An int16 string is a string prefixed by 2 bytes, the 16-bit length of
- the string encoded in network byte order.
-
- This class publishes the same interface as NetstringReceiver.
- """
- structFormat = "!H"
- prefixLength = calcsize(structFormat)
-
-
-
-class Int8StringReceiver(IntNStringReceiver):
- """
- A receiver for int8-prefixed strings.
-
- An int8 string is a string prefixed by 1 byte, the 8-bit length of
- the string.
-
- This class publishes the same interface as NetstringReceiver.
- """
- structFormat = "!B"
- prefixLength = calcsize(structFormat)
-
-
-
-class StatefulStringProtocol:
- """
- A stateful string protocol.
-
- This is a mixin for string protocols (Int32StringReceiver,
- NetstringReceiver) which translates stringReceived into a callback
- (prefixed with 'proto_') depending on state.
-
- The state 'done' is special; if a proto_* method returns it, the
- connection will be closed immediately.
- """
-
- state = 'init'
-
- def stringReceived(self, string):
- """
- Choose a protocol phase function and call it.
-
- Call back to the appropriate protocol phase; this begins with
- the function proto_init and moves on to proto_* depending on
- what each proto_* function returns. (For example, if
- self.proto_init returns 'foo', then self.proto_foo will be the
- next function called when a protocol message is received.
- """
- try:
- pto = 'proto_'+self.state
- statehandler = getattr(self,pto)
- except AttributeError:
- log.msg('callback',self.state,'not found')
- else:
- self.state = statehandler(string)
- if self.state == 'done':
- self.transport.loseConnection()
-
-
-
-class FileSender:
- """
- A producer that sends the contents of a file to a consumer.
-
- This is a helper for protocols that, at some point, will take a
- file-like object, read its contents, and write them out to the network,
- optionally performing some transformation on the bytes in between.
- """
- implements(interfaces.IProducer)
-
- CHUNK_SIZE = 2 ** 14
-
- lastSent = ''
- deferred = None
-
- def beginFileTransfer(self, file, consumer, transform = None):
- """
- Begin transferring a file
-
- @type file: Any file-like object
- @param file: The file object to read data from
-
- @type consumer: Any implementor of IConsumer
- @param consumer: The object to write data to
-
- @param transform: A callable taking one string argument and returning
- the same. All bytes read from the file are passed through this before
- being written to the consumer.
-
- @rtype: C{Deferred}
- @return: A deferred whose callback will be invoked when the file has
- been completely written to the consumer. The last byte written to the
- consumer is passed to the callback.
- """
- self.file = file
- self.consumer = consumer
- self.transform = transform
-
- self.deferred = deferred = defer.Deferred()
- self.consumer.registerProducer(self, False)
- return deferred
-
-
- def resumeProducing(self):
- chunk = ''
- if self.file:
- chunk = self.file.read(self.CHUNK_SIZE)
- if not chunk:
- self.file = None
- self.consumer.unregisterProducer()
- if self.deferred:
- self.deferred.callback(self.lastSent)
- self.deferred = None
- return
-
- if self.transform:
- chunk = self.transform(chunk)
- self.consumer.write(chunk)
- self.lastSent = chunk[-1]
-
-
- def pauseProducing(self):
- pass
-
-
- def stopProducing(self):
- if self.deferred:
- self.deferred.errback(
- Exception("Consumer asked us to stop producing"))
- self.deferred = None
diff --git a/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/dict.py b/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/dict.py
deleted file mode 100755
index c3af402d..00000000
--- a/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/dict.py
+++ /dev/null
@@ -1,362 +0,0 @@
-# Copyright (c) Twisted Matrix Laboratories.
-# See LICENSE for details.
-
-
-"""
-Dict client protocol implementation.
-
-@author: Pavel Pergamenshchik
-"""
-
-from twisted.protocols import basic
-from twisted.internet import defer, protocol
-from twisted.python import log
-from StringIO import StringIO
-
-def parseParam(line):
- """Chew one dqstring or atom from beginning of line and return (param, remaningline)"""
- if line == '':
- return (None, '')
- elif line[0] != '"': # atom
- mode = 1
- else: # dqstring
- mode = 2
- res = ""
- io = StringIO(line)
- if mode == 2: # skip the opening quote
- io.read(1)
- while 1:
- a = io.read(1)
- if a == '"':
- if mode == 2:
- io.read(1) # skip the separating space
- return (res, io.read())
- elif a == '\\':
- a = io.read(1)
- if a == '':
- return (None, line) # unexpected end of string
- elif a == '':
- if mode == 1:
- return (res, io.read())
- else:
- return (None, line) # unexpected end of string
- elif a == ' ':
- if mode == 1:
- return (res, io.read())
- res += a
-
-def makeAtom(line):
- """Munch a string into an 'atom'"""
- # FIXME: proper quoting
- return filter(lambda x: not (x in map(chr, range(33)+[34, 39, 92])), line)
-
-def makeWord(s):
- mustquote = range(33)+[34, 39, 92]
- result = []
- for c in s:
- if ord(c) in mustquote:
- result.append("\\")
- result.append(c)
- s = "".join(result)
- return s
-
-def parseText(line):
- if len(line) == 1 and line == '.':
- return None
- else:
- if len(line) > 1 and line[0:2] == '..':
- line = line[1:]
- return line
-
-class Definition:
- """A word definition"""
- def __init__(self, name, db, dbdesc, text):
- self.name = name
- self.db = db
- self.dbdesc = dbdesc
- self.text = text # list of strings not terminated by newline
-
-class DictClient(basic.LineReceiver):
- """dict (RFC2229) client"""
-
- data = None # multiline data
- MAX_LENGTH = 1024
- state = None
- mode = None
- result = None
- factory = None
-
- def __init__(self):
- self.data = None
- self.result = None
-
- def connectionMade(self):
- self.state = "conn"
- self.mode = "command"
-
- def sendLine(self, line):
- """Throw up if the line is longer than 1022 characters"""
- if len(line) > self.MAX_LENGTH - 2:
- raise ValueError("DictClient tried to send a too long line")
- basic.LineReceiver.sendLine(self, line)
-
- def lineReceived(self, line):
- try:
- line = line.decode("UTF-8")
- except UnicodeError: # garbage received, skip
- return
- if self.mode == "text": # we are receiving textual data
- code = "text"
- else:
- if len(line) < 4:
- log.msg("DictClient got invalid line from server -- %s" % line)
- self.protocolError("Invalid line from server")
- self.transport.LoseConnection()
- return
- code = int(line[:3])
- line = line[4:]
- method = getattr(self, 'dictCode_%s_%s' % (code, self.state), self.dictCode_default)
- method(line)
-
- def dictCode_default(self, line):
- """Unkown message"""
- log.msg("DictClient got unexpected message from server -- %s" % line)
- self.protocolError("Unexpected server message")
- self.transport.loseConnection()
-
- def dictCode_221_ready(self, line):
- """We are about to get kicked off, do nothing"""
- pass
-
- def dictCode_220_conn(self, line):
- """Greeting message"""
- self.state = "ready"
- self.dictConnected()
-
- def dictCode_530_conn(self):
- self.protocolError("Access denied")
- self.transport.loseConnection()
-
- def dictCode_420_conn(self):
- self.protocolError("Server temporarily unavailable")
- self.transport.loseConnection()
-
- def dictCode_421_conn(self):
- self.protocolError("Server shutting down at operator request")
- self.transport.loseConnection()
-
- def sendDefine(self, database, word):
- """Send a dict DEFINE command"""
- assert self.state == "ready", "DictClient.sendDefine called when not in ready state"
- self.result = None # these two are just in case. In "ready" state, result and data
- self.data = None # should be None
- self.state = "define"
- command = "DEFINE %s %s" % (makeAtom(database.encode("UTF-8")), makeWord(word.encode("UTF-8")))
- self.sendLine(command)
-
- def sendMatch(self, database, strategy, word):
- """Send a dict MATCH command"""
- assert self.state == "ready", "DictClient.sendMatch called when not in ready state"
- self.result = None
- self.data = None
- self.state = "match"
- command = "MATCH %s %s %s" % (makeAtom(database), makeAtom(strategy), makeAtom(word))
- self.sendLine(command.encode("UTF-8"))
-
- def dictCode_550_define(self, line):
- """Invalid database"""
- self.mode = "ready"
- self.defineFailed("Invalid database")
-
- def dictCode_550_match(self, line):
- """Invalid database"""
- self.mode = "ready"
- self.matchFailed("Invalid database")
-
- def dictCode_551_match(self, line):
- """Invalid strategy"""
- self.mode = "ready"
- self.matchFailed("Invalid strategy")
-
- def dictCode_552_define(self, line):
- """No match"""
- self.mode = "ready"
- self.defineFailed("No match")
-
- def dictCode_552_match(self, line):
- """No match"""
- self.mode = "ready"
- self.matchFailed("No match")
-
- def dictCode_150_define(self, line):
- """n definitions retrieved"""
- self.result = []
-
- def dictCode_151_define(self, line):
- """Definition text follows"""
- self.mode = "text"
- (word, line) = parseParam(line)
- (db, line) = parseParam(line)
- (dbdesc, line) = parseParam(line)
- if not (word and db and dbdesc):
- self.protocolError("Invalid server response")
- self.transport.loseConnection()
- else:
- self.result.append(Definition(word, db, dbdesc, []))
- self.data = []
-
- def dictCode_152_match(self, line):
- """n matches found, text follows"""
- self.mode = "text"
- self.result = []
- self.data = []
-
- def dictCode_text_define(self, line):
- """A line of definition text received"""
- res = parseText(line)
- if res == None:
- self.mode = "command"
- self.result[-1].text = self.data
- self.data = None
- else:
- self.data.append(line)
-
- def dictCode_text_match(self, line):
- """One line of match text received"""
- def l(s):
- p1, t = parseParam(s)
- p2, t = parseParam(t)
- return (p1, p2)
- res = parseText(line)
- if res == None:
- self.mode = "command"
- self.result = map(l, self.data)
- self.data = None
- else:
- self.data.append(line)
-
- def dictCode_250_define(self, line):
- """ok"""
- t = self.result
- self.result = None
- self.state = "ready"
- self.defineDone(t)
-
- def dictCode_250_match(self, line):
- """ok"""
- t = self.result
- self.result = None
- self.state = "ready"
- self.matchDone(t)
-
- def protocolError(self, reason):
- """override to catch unexpected dict protocol conditions"""
- pass
-
- def dictConnected(self):
- """override to be notified when the server is ready to accept commands"""
- pass
-
- def defineFailed(self, reason):
- """override to catch reasonable failure responses to DEFINE"""
- pass
-
- def defineDone(self, result):
- """override to catch succesful DEFINE"""
- pass
-
- def matchFailed(self, reason):
- """override to catch resonable failure responses to MATCH"""
- pass
-
- def matchDone(self, result):
- """override to catch succesful MATCH"""
- pass
-
-
-class InvalidResponse(Exception):
- pass
-
-
-class DictLookup(DictClient):
- """Utility class for a single dict transaction. To be used with DictLookupFactory"""
-
- def protocolError(self, reason):
- if not self.factory.done:
- self.factory.d.errback(InvalidResponse(reason))
- self.factory.clientDone()
-
- def dictConnected(self):
- if self.factory.queryType == "define":
- apply(self.sendDefine, self.factory.param)
- elif self.factory.queryType == "match":
- apply(self.sendMatch, self.factory.param)
-
- def defineFailed(self, reason):
- self.factory.d.callback([])
- self.factory.clientDone()
- self.transport.loseConnection()
-
- def defineDone(self, result):
- self.factory.d.callback(result)
- self.factory.clientDone()
- self.transport.loseConnection()
-
- def matchFailed(self, reason):
- self.factory.d.callback([])
- self.factory.clientDone()
- self.transport.loseConnection()
-
- def matchDone(self, result):
- self.factory.d.callback(result)
- self.factory.clientDone()
- self.transport.loseConnection()
-
-
-class DictLookupFactory(protocol.ClientFactory):
- """Utility factory for a single dict transaction"""
- protocol = DictLookup
- done = None
-
- def __init__(self, queryType, param, d):
- self.queryType = queryType
- self.param = param
- self.d = d
- self.done = 0
-
- def clientDone(self):
- """Called by client when done."""
- self.done = 1
- del self.d
-
- def clientConnectionFailed(self, connector, error):
- self.d.errback(error)
-
- def clientConnectionLost(self, connector, error):
- if not self.done:
- self.d.errback(error)
-
- def buildProtocol(self, addr):
- p = self.protocol()
- p.factory = self
- return p
-
-
-def define(host, port, database, word):
- """Look up a word using a dict server"""
- d = defer.Deferred()
- factory = DictLookupFactory("define", (database, word), d)
-
- from twisted.internet import reactor
- reactor.connectTCP(host, port, factory)
- return d
-
-def match(host, port, database, strategy, word):
- """Match a word using a dict server"""
- d = defer.Deferred()
- factory = DictLookupFactory("match", (database, strategy, word), d)
-
- from twisted.internet import reactor
- reactor.connectTCP(host, port, factory)
- return d
-
diff --git a/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/finger.py b/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/finger.py
deleted file mode 100755
index fcb93967..00000000
--- a/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/finger.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# Copyright (c) Twisted Matrix Laboratories.
-# See LICENSE for details.
-
-
-"""The Finger User Information Protocol (RFC 1288)"""
-
-from twisted.protocols import basic
-
-class Finger(basic.LineReceiver):
-
- def lineReceived(self, line):
- parts = line.split()
- if not parts:
- parts = ['']
- if len(parts) == 1:
- slash_w = 0
- else:
- slash_w = 1
- user = parts[-1]
- if '@' in user:
- host_place = user.rfind('@')
- user = user[:host_place]
- host = user[host_place+1:]
- return self.forwardQuery(slash_w, user, host)
- if user:
- return self.getUser(slash_w, user)
- else:
- return self.getDomain(slash_w)
-
- def _refuseMessage(self, message):
- self.transport.write(message+"\n")
- self.transport.loseConnection()
-
- def forwardQuery(self, slash_w, user, host):
- self._refuseMessage('Finger forwarding service denied')
-
- def getDomain(self, slash_w):
- self._refuseMessage('Finger online list denied')
-
- def getUser(self, slash_w, user):
- self.transport.write('Login: '+user+'\n')
- self._refuseMessage('No such user')
diff --git a/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/ftp.py b/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/ftp.py
deleted file mode 100755
index b035f04e..00000000
--- a/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/ftp.py
+++ /dev/null
@@ -1,2955 +0,0 @@
-# -*- test-case-name: twisted.test.test_ftp -*-
-# Copyright (c) Twisted Matrix Laboratories.
-# See LICENSE for details.
-
-"""
-An FTP protocol implementation
-"""
-
-# System Imports
-import os
-import time
-import re
-import operator
-import stat
-import errno
-import fnmatch
-import warnings
-
-try:
- import pwd, grp
-except ImportError:
- pwd = grp = None
-
-from zope.interface import Interface, implements
-
-# Twisted Imports
-from twisted import copyright
-from twisted.internet import reactor, interfaces, protocol, error, defer
-from twisted.protocols import basic, policies
-
-from twisted.python import log, failure, filepath
-from twisted.python.compat import reduce
-
-from twisted.cred import error as cred_error, portal, credentials, checkers
-
-# constants
-# response codes
-
-RESTART_MARKER_REPLY = "100"
-SERVICE_READY_IN_N_MINUTES = "120"
-DATA_CNX_ALREADY_OPEN_START_XFR = "125"
-FILE_STATUS_OK_OPEN_DATA_CNX = "150"
-
-CMD_OK = "200.1"
-TYPE_SET_OK = "200.2"
-ENTERING_PORT_MODE = "200.3"
-CMD_NOT_IMPLMNTD_SUPERFLUOUS = "202"
-SYS_STATUS_OR_HELP_REPLY = "211"
-DIR_STATUS = "212"
-FILE_STATUS = "213"
-HELP_MSG = "214"
-NAME_SYS_TYPE = "215"
-SVC_READY_FOR_NEW_USER = "220.1"
-WELCOME_MSG = "220.2"
-SVC_CLOSING_CTRL_CNX = "221.1"
-GOODBYE_MSG = "221.2"
-DATA_CNX_OPEN_NO_XFR_IN_PROGRESS = "225"
-CLOSING_DATA_CNX = "226.1"
-TXFR_COMPLETE_OK = "226.2"
-ENTERING_PASV_MODE = "227"
-ENTERING_EPSV_MODE = "229"
-USR_LOGGED_IN_PROCEED = "230.1" # v1 of code 230
-GUEST_LOGGED_IN_PROCEED = "230.2" # v2 of code 230
-REQ_FILE_ACTN_COMPLETED_OK = "250"
-PWD_REPLY = "257.1"
-MKD_REPLY = "257.2"
-
-USR_NAME_OK_NEED_PASS = "331.1" # v1 of Code 331
-GUEST_NAME_OK_NEED_EMAIL = "331.2" # v2 of code 331
-NEED_ACCT_FOR_LOGIN = "332"
-REQ_FILE_ACTN_PENDING_FURTHER_INFO = "350"
-
-SVC_NOT_AVAIL_CLOSING_CTRL_CNX = "421.1"
-TOO_MANY_CONNECTIONS = "421.2"
-CANT_OPEN_DATA_CNX = "425"
-CNX_CLOSED_TXFR_ABORTED = "426"
-REQ_ACTN_ABRTD_FILE_UNAVAIL = "450"
-REQ_ACTN_ABRTD_LOCAL_ERR = "451"
-REQ_ACTN_ABRTD_INSUFF_STORAGE = "452"
-
-SYNTAX_ERR = "500"
-SYNTAX_ERR_IN_ARGS = "501"
-CMD_NOT_IMPLMNTD = "502"
-BAD_CMD_SEQ = "503"
-CMD_NOT_IMPLMNTD_FOR_PARAM = "504"
-NOT_LOGGED_IN = "530.1" # v1 of code 530 - please log in
-AUTH_FAILURE = "530.2" # v2 of code 530 - authorization failure
-NEED_ACCT_FOR_STOR = "532"
-FILE_NOT_FOUND = "550.1" # no such file or directory
-PERMISSION_DENIED = "550.2" # permission denied
-ANON_USER_DENIED = "550.3" # anonymous users can't alter filesystem
-IS_NOT_A_DIR = "550.4" # rmd called on a path that is not a directory
-REQ_ACTN_NOT_TAKEN = "550.5"
-FILE_EXISTS = "550.6"
-IS_A_DIR = "550.7"
-PAGE_TYPE_UNK = "551"
-EXCEEDED_STORAGE_ALLOC = "552"
-FILENAME_NOT_ALLOWED = "553"
-
-
-RESPONSE = {
- # -- 100's --
- RESTART_MARKER_REPLY: '110 MARK yyyy-mmmm', # TODO: this must be fixed
- SERVICE_READY_IN_N_MINUTES: '120 service ready in %s minutes',
- DATA_CNX_ALREADY_OPEN_START_XFR: '125 Data connection already open, starting transfer',
- FILE_STATUS_OK_OPEN_DATA_CNX: '150 File status okay; about to open data connection.',
-
- # -- 200's --
- CMD_OK: '200 Command OK',
- TYPE_SET_OK: '200 Type set to %s.',
- ENTERING_PORT_MODE: '200 PORT OK',
- CMD_NOT_IMPLMNTD_SUPERFLUOUS: '202 Command not implemented, superfluous at this site',
- SYS_STATUS_OR_HELP_REPLY: '211 System status reply',
- DIR_STATUS: '212 %s',
- FILE_STATUS: '213 %s',
- HELP_MSG: '214 help: %s',
- NAME_SYS_TYPE: '215 UNIX Type: L8',
- WELCOME_MSG: "220 %s",
- SVC_READY_FOR_NEW_USER: '220 Service ready',
- SVC_CLOSING_CTRL_CNX: '221 Service closing control connection',
- GOODBYE_MSG: '221 Goodbye.',
- DATA_CNX_OPEN_NO_XFR_IN_PROGRESS: '225 data connection open, no transfer in progress',
- CLOSING_DATA_CNX: '226 Abort successful',
- TXFR_COMPLETE_OK: '226 Transfer Complete.',
- ENTERING_PASV_MODE: '227 Entering Passive Mode (%s).',
- ENTERING_EPSV_MODE: '229 Entering Extended Passive Mode (|||%s|).', # where is epsv defined in the rfc's?
- USR_LOGGED_IN_PROCEED: '230 User logged in, proceed',
- GUEST_LOGGED_IN_PROCEED: '230 Anonymous login ok, access restrictions apply.',
- REQ_FILE_ACTN_COMPLETED_OK: '250 Requested File Action Completed OK', #i.e. CWD completed ok
- PWD_REPLY: '257 "%s"',
- MKD_REPLY: '257 "%s" created',
-
- # -- 300's --
- USR_NAME_OK_NEED_PASS: '331 Password required for %s.',
- GUEST_NAME_OK_NEED_EMAIL: '331 Guest login ok, type your email address as password.',
- NEED_ACCT_FOR_LOGIN: '332 Need account for login.',
-
- REQ_FILE_ACTN_PENDING_FURTHER_INFO: '350 Requested file action pending further information.',
-
-# -- 400's --
- SVC_NOT_AVAIL_CLOSING_CTRL_CNX: '421 Service not available, closing control connection.',
- TOO_MANY_CONNECTIONS: '421 Too many users right now, try again in a few minutes.',
- CANT_OPEN_DATA_CNX: "425 Can't open data connection.",
- CNX_CLOSED_TXFR_ABORTED: '426 Transfer aborted. Data connection closed.',
-
- REQ_ACTN_ABRTD_FILE_UNAVAIL: '450 Requested action aborted. File unavailable.',
- REQ_ACTN_ABRTD_LOCAL_ERR: '451 Requested action aborted. Local error in processing.',
- REQ_ACTN_ABRTD_INSUFF_STORAGE: '452 Requested action aborted. Insufficient storage.',
-
- # -- 500's --
- SYNTAX_ERR: "500 Syntax error: %s",
- SYNTAX_ERR_IN_ARGS: '501 syntax error in argument(s) %s.',
- CMD_NOT_IMPLMNTD: "502 Command '%s' not implemented",
- BAD_CMD_SEQ: '503 Incorrect sequence of commands: %s',
- CMD_NOT_IMPLMNTD_FOR_PARAM: "504 Not implemented for parameter '%s'.",
- NOT_LOGGED_IN: '530 Please login with USER and PASS.',
- AUTH_FAILURE: '530 Sorry, Authentication failed.',
- NEED_ACCT_FOR_STOR: '532 Need an account for storing files',
- FILE_NOT_FOUND: '550 %s: No such file or directory.',
- PERMISSION_DENIED: '550 %s: Permission denied.',
- ANON_USER_DENIED: '550 Anonymous users are forbidden to change the filesystem',
- IS_NOT_A_DIR: '550 Cannot rmd, %s is not a directory',
- FILE_EXISTS: '550 %s: File exists',
- IS_A_DIR: '550 %s: is a directory',
- REQ_ACTN_NOT_TAKEN: '550 Requested action not taken: %s',
- PAGE_TYPE_UNK: '551 Page type unknown',
- EXCEEDED_STORAGE_ALLOC: '552 Requested file action aborted, exceeded file storage allocation',
- FILENAME_NOT_ALLOWED: '553 Requested action not taken, file name not allowed'
-}
-
-
-
-class InvalidPath(Exception):
- """
- Internal exception used to signify an error during parsing a path.
- """
-
-
-
-def toSegments(cwd, path):
- """
- Normalize a path, as represented by a list of strings each
- representing one segment of the path.
- """
- if path.startswith('/'):
- segs = []
- else:
- segs = cwd[:]
-
- for s in path.split('/'):
- if s == '.' or s == '':
- continue
- elif s == '..':
- if segs:
- segs.pop()
- else:
- raise InvalidPath(cwd, path)
- elif '\0' in s or '/' in s:
- raise InvalidPath(cwd, path)
- else:
- segs.append(s)
- return segs
-
-
-def errnoToFailure(e, path):
- """
- Map C{OSError} and C{IOError} to standard FTP errors.
- """
- if e == errno.ENOENT:
- return defer.fail(FileNotFoundError(path))
- elif e == errno.EACCES or e == errno.EPERM:
- return defer.fail(PermissionDeniedError(path))
- elif e == errno.ENOTDIR:
- return defer.fail(IsNotADirectoryError(path))
- elif e == errno.EEXIST:
- return defer.fail(FileExistsError(path))
- elif e == errno.EISDIR:
- return defer.fail(IsADirectoryError(path))
- else:
- return defer.fail()
-
-
-
-class FTPCmdError(Exception):
- """
- Generic exception for FTP commands.
- """
- def __init__(self, *msg):
- Exception.__init__(self, *msg)
- self.errorMessage = msg
-
-
- def response(self):
- """
- Generate a FTP response message for this error.
- """
- return RESPONSE[self.errorCode] % self.errorMessage
-
-
-
-class FileNotFoundError(FTPCmdError):
- """
- Raised when trying to access a non existent file or directory.
- """
- errorCode = FILE_NOT_FOUND
-
-
-
-class AnonUserDeniedError(FTPCmdError):
- """
- Raised when an anonymous user issues a command that will alter the
- filesystem
- """
-
- errorCode = ANON_USER_DENIED
-
-
-
-class PermissionDeniedError(FTPCmdError):
- """
- Raised when access is attempted to a resource to which access is
- not allowed.
- """
- errorCode = PERMISSION_DENIED
-
-
-
-class IsNotADirectoryError(FTPCmdError):
- """
- Raised when RMD is called on a path that isn't a directory.
- """
- errorCode = IS_NOT_A_DIR
-
-
-
-class FileExistsError(FTPCmdError):
- """
- Raised when attempted to override an existing resource.
- """
- errorCode = FILE_EXISTS
-
-
-
-class IsADirectoryError(FTPCmdError):
- """
- Raised when DELE is called on a path that is a directory.
- """
- errorCode = IS_A_DIR
-
-
-
-class CmdSyntaxError(FTPCmdError):
- """
- Raised when a command syntax is wrong.
- """
- errorCode = SYNTAX_ERR
-
-
-
-class CmdArgSyntaxError(FTPCmdError):
- """
- Raised when a command is called with wrong value or a wrong number of
- arguments.
- """
- errorCode = SYNTAX_ERR_IN_ARGS
-
-
-
-class CmdNotImplementedError(FTPCmdError):
- """
- Raised when an unimplemented command is given to the server.
- """
- errorCode = CMD_NOT_IMPLMNTD
-
-
-
-class CmdNotImplementedForArgError(FTPCmdError):
- """
- Raised when the handling of a parameter for a command is not implemented by
- the server.
- """
- errorCode = CMD_NOT_IMPLMNTD_FOR_PARAM
-
-
-
-class FTPError(Exception):
- pass
-
-
-
-class PortConnectionError(Exception):
- pass
-
-
-
-class BadCmdSequenceError(FTPCmdError):
- """
- Raised when a client sends a series of commands in an illogical sequence.
- """
- errorCode = BAD_CMD_SEQ
-
-
-
-class AuthorizationError(FTPCmdError):
- """
- Raised when client authentication fails.
- """
- errorCode = AUTH_FAILURE
-
-
-
-def debugDeferred(self, *_):
- log.msg('debugDeferred(): %s' % str(_), debug=True)
-
-
-# -- DTP Protocol --
-
-
-_months = [
- None,
- 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
- 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
-
-
-class DTP(object, protocol.Protocol):
- implements(interfaces.IConsumer)
-
- isConnected = False
-
- _cons = None
- _onConnLost = None
- _buffer = None
-
- def connectionMade(self):
- self.isConnected = True
- self.factory.deferred.callback(None)
- self._buffer = []
-
- def connectionLost(self, reason):
- self.isConnected = False
- if self._onConnLost is not None:
- self._onConnLost.callback(None)
-
- def sendLine(self, line):
- self.transport.write(line + '\r\n')
-
-
- def _formatOneListResponse(self, name, size, directory, permissions, hardlinks, modified, owner, group):
- def formatMode(mode):
- return ''.join([mode & (256 >> n) and 'rwx'[n % 3] or '-' for n in range(9)])
-
- def formatDate(mtime):
- now = time.gmtime()
- info = {
- 'month': _months[mtime.tm_mon],
- 'day': mtime.tm_mday,
- 'year': mtime.tm_year,
- 'hour': mtime.tm_hour,
- 'minute': mtime.tm_min
- }
- if now.tm_year != mtime.tm_year:
- return '%(month)s %(day)02d %(year)5d' % info
- else:
- return '%(month)s %(day)02d %(hour)02d:%(minute)02d' % info
-
- format = ('%(directory)s%(permissions)s%(hardlinks)4d '
- '%(owner)-9s %(group)-9s %(size)15d %(date)12s '
- '%(name)s')
-
- return format % {
- 'directory': directory and 'd' or '-',
- 'permissions': formatMode(permissions),
- 'hardlinks': hardlinks,
- 'owner': owner[:8],
- 'group': group[:8],
- 'size': size,
- 'date': formatDate(time.gmtime(modified)),
- 'name': name}
-
- def sendListResponse(self, name, response):
- self.sendLine(self._formatOneListResponse(name, *response))
-
-
- # Proxy IConsumer to our transport
- def registerProducer(self, producer, streaming):
- return self.transport.registerProducer(producer, streaming)
-
- def unregisterProducer(self):
- self.transport.unregisterProducer()
- self.transport.loseConnection()
-
- def write(self, data):
- if self.isConnected:
- return self.transport.write(data)
- raise Exception("Crap damn crap damn crap damn")
-
-
- # Pretend to be a producer, too.
- def _conswrite(self, bytes):
- try:
- self._cons.write(bytes)
- except:
- self._onConnLost.errback()
-
- def dataReceived(self, bytes):
- if self._cons is not None:
- self._conswrite(bytes)
- else:
- self._buffer.append(bytes)
-
- def _unregConsumer(self, ignored):
- self._cons.unregisterProducer()
- self._cons = None
- del self._onConnLost
- return ignored
-
- def registerConsumer(self, cons):
- assert self._cons is None
- self._cons = cons
- self._cons.registerProducer(self, True)
- for chunk in self._buffer:
- self._conswrite(chunk)
- self._buffer = None
- if self.isConnected:
- self._onConnLost = d = defer.Deferred()
- d.addBoth(self._unregConsumer)
- return d
- else:
- self._cons.unregisterProducer()
- self._cons = None
- return defer.succeed(None)
-
- def resumeProducing(self):
- self.transport.resumeProducing()
-
- def pauseProducing(self):
- self.transport.pauseProducing()
-
- def stopProducing(self):
- self.transport.stopProducing()
-
-class DTPFactory(protocol.ClientFactory):
- """
- Client factory for I{data transfer process} protocols.
-
- @ivar peerCheck: perform checks to make sure the ftp-pi's peer is the same
- as the dtp's
- @ivar pi: a reference to this factory's protocol interpreter
-
- @ivar _state: Indicates the current state of the DTPFactory. Initially,
- this is L{_IN_PROGRESS}. If the connection fails or times out, it is
- L{_FAILED}. If the connection succeeds before the timeout, it is
- L{_FINISHED}.
- """
-
- _IN_PROGRESS = object()
- _FAILED = object()
- _FINISHED = object()
-
- _state = _IN_PROGRESS
-
- # -- configuration variables --
- peerCheck = False
-
- # -- class variables --
- def __init__(self, pi, peerHost=None, reactor=None):
- """Constructor
- @param pi: this factory's protocol interpreter
- @param peerHost: if peerCheck is True, this is the tuple that the
- generated instance will use to perform security checks
- """
- self.pi = pi # the protocol interpreter that is using this factory
- self.peerHost = peerHost # the from FTP.transport.peerHost()
- self.deferred = defer.Deferred() # deferred will fire when instance is connected
- self.delayedCall = None
- if reactor is None:
- from twisted.internet import reactor
- self._reactor = reactor
-
-
- def buildProtocol(self, addr):
- log.msg('DTPFactory.buildProtocol', debug=True)
-
- if self._state is not self._IN_PROGRESS:
- return None
- self._state = self._FINISHED
-
- self.cancelTimeout()
- p = DTP()
- p.factory = self
- p.pi = self.pi
- self.pi.dtpInstance = p
- return p
-
-
- def stopFactory(self):
- log.msg('dtpFactory.stopFactory', debug=True)
- self.cancelTimeout()
-
-
- def timeoutFactory(self):
- log.msg('timed out waiting for DTP connection')
- if self._state is not self._IN_PROGRESS:
- return
- self._state = self._FAILED
-
- d = self.deferred
- self.deferred = None
- d.errback(
- PortConnectionError(defer.TimeoutError("DTPFactory timeout")))
-
-
- def cancelTimeout(self):
- if self.delayedCall is not None and self.delayedCall.active():
- log.msg('cancelling DTP timeout', debug=True)
- self.delayedCall.cancel()
-
-
- def setTimeout(self, seconds):
- log.msg('DTPFactory.setTimeout set to %s seconds' % seconds)
- self.delayedCall = self._reactor.callLater(seconds, self.timeoutFactory)
-
-
- def clientConnectionFailed(self, connector, reason):
- if self._state is not self._IN_PROGRESS:
- return
- self._state = self._FAILED
- d = self.deferred
- self.deferred = None
- d.errback(PortConnectionError(reason))
-
-
-# -- FTP-PI (Protocol Interpreter) --
-
-class ASCIIConsumerWrapper(object):
- def __init__(self, cons):
- self.cons = cons
- self.registerProducer = cons.registerProducer
- self.unregisterProducer = cons.unregisterProducer
-
- assert os.linesep == "\r\n" or len(os.linesep) == 1, "Unsupported platform (yea right like this even exists)"
-
- if os.linesep == "\r\n":
- self.write = cons.write
-
- def write(self, bytes):
- return self.cons.write(bytes.replace(os.linesep, "\r\n"))
-
-
-
-class FileConsumer(object):
- """
- A consumer for FTP input that writes data to a file.
-
- @ivar fObj: a file object opened for writing, used to write data received.
- @type fObj: C{file}
- """
-
- implements(interfaces.IConsumer)
-
- def __init__(self, fObj):
- self.fObj = fObj
-
-
- def registerProducer(self, producer, streaming):
- self.producer = producer
- assert streaming
-
-
- def unregisterProducer(self):
- self.producer = None
- self.fObj.close()
-
-
- def write(self, bytes):
- self.fObj.write(bytes)
-
-
-
-class FTPOverflowProtocol(basic.LineReceiver):
- """FTP mini-protocol for when there are too many connections."""
- def connectionMade(self):
- self.sendLine(RESPONSE[TOO_MANY_CONNECTIONS])
- self.transport.loseConnection()
-
-
-class FTP(object, basic.LineReceiver, policies.TimeoutMixin):
- """
- Protocol Interpreter for the File Transfer Protocol
-
- @ivar state: The current server state. One of L{UNAUTH},
- L{INAUTH}, L{AUTHED}, L{RENAMING}.
-
- @ivar shell: The connected avatar
- @ivar binary: The transfer mode. If false, ASCII.
- @ivar dtpFactory: Generates a single DTP for this session
- @ivar dtpPort: Port returned from listenTCP
- @ivar listenFactory: A callable with the signature of
- L{twisted.internet.interfaces.IReactorTCP.listenTCP} which will be used
- to create Ports for passive connections (mainly for testing).
-
- @ivar passivePortRange: iterator used as source of passive port numbers.
- @type passivePortRange: C{iterator}
- """
-
- disconnected = False
-
- # States an FTP can be in
- UNAUTH, INAUTH, AUTHED, RENAMING = range(4)
-
- # how long the DTP waits for a connection
- dtpTimeout = 10
-
- portal = None
- shell = None
- dtpFactory = None
- dtpPort = None
- dtpInstance = None
- binary = True
-
- passivePortRange = xrange(0, 1)
-
- listenFactory = reactor.listenTCP
-
- def reply(self, key, *args):
- msg = RESPONSE[key] % args
- self.sendLine(msg)
-
-
- def connectionMade(self):
- self.state = self.UNAUTH
- self.setTimeout(self.timeOut)
- self.reply(WELCOME_MSG, self.factory.welcomeMessage)
-
- def connectionLost(self, reason):
- # if we have a DTP protocol instance running and
- # we lose connection to the client's PI, kill the
- # DTP connection and close the port
- if self.dtpFactory:
- self.cleanupDTP()
- self.setTimeout(None)
- if hasattr(self.shell, 'logout') and self.shell.logout is not None:
- self.shell.logout()
- self.shell = None
- self.transport = None
-
- def timeoutConnection(self):
- self.transport.loseConnection()
-
- def lineReceived(self, line):
- self.resetTimeout()
- self.pauseProducing()
-
- def processFailed(err):
- if err.check(FTPCmdError):
- self.sendLine(err.value.response())
- elif (err.check(TypeError) and
- err.value.args[0].find('takes exactly') != -1):
- self.reply(SYNTAX_ERR, "%s requires an argument." % (cmd,))
- else:
- log.msg("Unexpected FTP error")
- log.err(err)
- self.reply(REQ_ACTN_NOT_TAKEN, "internal server error")
-
- def processSucceeded(result):
- if isinstance(result, tuple):
- self.reply(*result)
- elif result is not None:
- self.reply(result)
-
- def allDone(ignored):
- if not self.disconnected:
- self.resumeProducing()
-
- spaceIndex = line.find(' ')
- if spaceIndex != -1:
- cmd = line[:spaceIndex]
- args = (line[spaceIndex + 1:],)
- else:
- cmd = line
- args = ()
- d = defer.maybeDeferred(self.processCommand, cmd, *args)
- d.addCallbacks(processSucceeded, processFailed)
- d.addErrback(log.err)
-
- # XXX It burnsss
- # LineReceiver doesn't let you resumeProducing inside
- # lineReceived atm
- from twisted.internet import reactor
- reactor.callLater(0, d.addBoth, allDone)
-
-
- def processCommand(self, cmd, *params):
- cmd = cmd.upper()
-
- if self.state == self.UNAUTH:
- if cmd == 'USER':
- return self.ftp_USER(*params)
- elif cmd == 'PASS':
- return BAD_CMD_SEQ, "USER required before PASS"
- else:
- return NOT_LOGGED_IN
-
- elif self.state == self.INAUTH:
- if cmd == 'PASS':
- return self.ftp_PASS(*params)
- else:
- return BAD_CMD_SEQ, "PASS required after USER"
-
- elif self.state == self.AUTHED:
- method = getattr(self, "ftp_" + cmd, None)
- if method is not None:
- return method(*params)
- return defer.fail(CmdNotImplementedError(cmd))
-
- elif self.state == self.RENAMING:
- if cmd == 'RNTO':
- return self.ftp_RNTO(*params)
- else:
- return BAD_CMD_SEQ, "RNTO required after RNFR"
-
-
- def getDTPPort(self, factory):
- """
- Return a port for passive access, using C{self.passivePortRange}
- attribute.
- """
- for portn in self.passivePortRange:
- try:
- dtpPort = self.listenFactory(portn, factory)
- except error.CannotListenError:
- continue
- else:
- return dtpPort
- raise error.CannotListenError('', portn,
- "No port available in range %s" %
- (self.passivePortRange,))
-
-
- def ftp_USER(self, username):
- """
- First part of login. Get the username the peer wants to
- authenticate as.
- """
- if not username:
- return defer.fail(CmdSyntaxError('USER requires an argument'))
-
- self._user = username
- self.state = self.INAUTH
- if self.factory.allowAnonymous and self._user == self.factory.userAnonymous:
- return GUEST_NAME_OK_NEED_EMAIL
- else:
- return (USR_NAME_OK_NEED_PASS, username)
-
- # TODO: add max auth try before timeout from ip...
- # TODO: need to implement minimal ABOR command
-
- def ftp_PASS(self, password):
- """
- Second part of login. Get the password the peer wants to
- authenticate with.
- """
- if self.factory.allowAnonymous and self._user == self.factory.userAnonymous:
- # anonymous login
- creds = credentials.Anonymous()
- reply = GUEST_LOGGED_IN_PROCEED
- else:
- # user login
- creds = credentials.UsernamePassword(self._user, password)
- reply = USR_LOGGED_IN_PROCEED
- del self._user
-
- def _cbLogin((interface, avatar, logout)):
- assert interface is IFTPShell, "The realm is busted, jerk."
- self.shell = avatar
- self.logout = logout
- self.workingDirectory = []
- self.state = self.AUTHED
- return reply
-
- def _ebLogin(failure):
- failure.trap(cred_error.UnauthorizedLogin, cred_error.UnhandledCredentials)
- self.state = self.UNAUTH
- raise AuthorizationError
-
- d = self.portal.login(creds, None, IFTPShell)
- d.addCallbacks(_cbLogin, _ebLogin)
- return d
-
-
- def ftp_PASV(self):
- """Request for a passive connection
-
- from the rfc::
-
- This command requests the server-DTP to \"listen\" on a data port
- (which is not its default data port) and to wait for a connection
- rather than initiate one upon receipt of a transfer command. The
- response to this command includes the host and port address this
- server is listening on.
- """
- # if we have a DTP port set up, lose it.
- if self.dtpFactory is not None:
- # cleanupDTP sets dtpFactory to none. Later we'll do
- # cleanup here or something.
- self.cleanupDTP()
- self.dtpFactory = DTPFactory(pi=self)
- self.dtpFactory.setTimeout(self.dtpTimeout)
- self.dtpPort = self.getDTPPort(self.dtpFactory)
-
- host = self.transport.getHost().host
- port = self.dtpPort.getHost().port
- self.reply(ENTERING_PASV_MODE, encodeHostPort(host, port))
- return self.dtpFactory.deferred.addCallback(lambda ign: None)
-
-
- def ftp_PORT(self, address):
- addr = map(int, address.split(','))
- ip = '%d.%d.%d.%d' % tuple(addr[:4])
- port = addr[4] << 8 | addr[5]
-
- # if we have a DTP port set up, lose it.
- if self.dtpFactory is not None:
- self.cleanupDTP()
-
- self.dtpFactory = DTPFactory(pi=self, peerHost=self.transport.getPeer().host)
- self.dtpFactory.setTimeout(self.dtpTimeout)
- self.dtpPort = reactor.connectTCP(ip, port, self.dtpFactory)
-
- def connected(ignored):
- return ENTERING_PORT_MODE
- def connFailed(err):
- err.trap(PortConnectionError)
- return CANT_OPEN_DATA_CNX
- return self.dtpFactory.deferred.addCallbacks(connected, connFailed)
-
-
- def ftp_LIST(self, path=''):
- """ This command causes a list to be sent from the server to the
- passive DTP. If the pathname specifies a directory or other
- group of files, the server should transfer a list of files
- in the specified directory. If the pathname specifies a
- file then the server should send current information on the
- file. A null argument implies the user's current working or
- default directory.
- """
- # Uh, for now, do this retarded thing.
- if self.dtpInstance is None or not self.dtpInstance.isConnected:
- return defer.fail(BadCmdSequenceError('must send PORT or PASV before RETR'))
-
- # bug in konqueror
- if path == "-a":
- path = ''
- # bug in gFTP 2.0.15
- if path == "-aL":
- path = ''
- # bug in Nautilus 2.10.0
- if path == "-L":
- path = ''
- # bug in ange-ftp
- if path == "-la":
- path = ''
-
- def gotListing(results):
- self.reply(DATA_CNX_ALREADY_OPEN_START_XFR)
- for (name, attrs) in results:
- self.dtpInstance.sendListResponse(name, attrs)
- self.dtpInstance.transport.loseConnection()
- return (TXFR_COMPLETE_OK,)
-
- try:
- segments = toSegments(self.workingDirectory, path)
- except InvalidPath:
- return defer.fail(FileNotFoundError(path))
-
- d = self.shell.list(
- segments,
- ('size', 'directory', 'permissions', 'hardlinks',
- 'modified', 'owner', 'group'))
- d.addCallback(gotListing)
- return d
-
-
- def ftp_NLST(self, path):
- """
- This command causes a directory listing to be sent from the server to
- the client. The pathname should specify a directory or other
- system-specific file group descriptor. An empty path implies the current
- working directory. If the path is non-existent, send nothing. If the
- path is to a file, send only the file name.
-
- @type path: C{str}
- @param path: The path for which a directory listing should be returned.
-
- @rtype: L{Deferred}
- @return: a L{Deferred} which will be fired when the listing request
- is finished.
- """
- # XXX: why is this check different from ftp_RETR/ftp_STOR? See #4180
- if self.dtpInstance is None or not self.dtpInstance.isConnected:
- return defer.fail(
- BadCmdSequenceError('must send PORT or PASV before RETR'))
-
- try:
- segments = toSegments(self.workingDirectory, path)
- except InvalidPath:
- return defer.fail(FileNotFoundError(path))
-
- def cbList(results):
- """
- Send, line by line, each file in the directory listing, and then
- close the connection.
-
- @type results: A C{list} of C{tuple}. The first element of each
- C{tuple} is a C{str} and the second element is a C{list}.
- @param results: The names of the files in the directory.
-
- @rtype: C{tuple}
- @return: A C{tuple} containing the status code for a successful
- transfer.
- """
- self.reply(DATA_CNX_ALREADY_OPEN_START_XFR)
- for (name, ignored) in results:
- self.dtpInstance.sendLine(name)
- self.dtpInstance.transport.loseConnection()
- return (TXFR_COMPLETE_OK,)
-
- def cbGlob(results):
- self.reply(DATA_CNX_ALREADY_OPEN_START_XFR)
- for (name, ignored) in results:
- if fnmatch.fnmatch(name, segments[-1]):
- self.dtpInstance.sendLine(name)
- self.dtpInstance.transport.loseConnection()
- return (TXFR_COMPLETE_OK,)
-
- def listErr(results):
- """
- RFC 959 specifies that an NLST request may only return directory
- listings. Thus, send nothing and just close the connection.
-
- @type results: L{Failure}
- @param results: The L{Failure} wrapping a L{FileNotFoundError} that
- occurred while trying to list the contents of a nonexistent
- directory.
-
- @rtype: C{tuple}
- @returns: A C{tuple} containing the status code for a successful
- transfer.
- """
- self.dtpInstance.transport.loseConnection()
- return (TXFR_COMPLETE_OK,)
-
- # XXX This globbing may be incomplete: see #4181
- if segments and (
- '*' in segments[-1] or '?' in segments[-1] or
- ('[' in segments[-1] and ']' in segments[-1])):
- d = self.shell.list(segments[:-1])
- d.addCallback(cbGlob)
- else:
- d = self.shell.list(segments)
- d.addCallback(cbList)
- # self.shell.list will generate an error if the path is invalid
- d.addErrback(listErr)
- return d
-
-
- def ftp_CWD(self, path):
- try:
- segments = toSegments(self.workingDirectory, path)
- except InvalidPath:
- # XXX Eh, what to fail with here?
- return defer.fail(FileNotFoundError(path))
-
- def accessGranted(result):
- self.workingDirectory = segments
- return (REQ_FILE_ACTN_COMPLETED_OK,)
-
- return self.shell.access(segments).addCallback(accessGranted)
-
-
- def ftp_CDUP(self):
- return self.ftp_CWD('..')
-
-
- def ftp_PWD(self):
- return (PWD_REPLY, '/' + '/'.join(self.workingDirectory))
-
-
- def ftp_RETR(self, path):
- """
- This command causes the content of a file to be sent over the data
- transfer channel. If the path is to a folder, an error will be raised.
-
- @type path: C{str}
- @param path: The path to the file which should be transferred over the
- data transfer channel.
-
- @rtype: L{Deferred}
- @return: a L{Deferred} which will be fired when the transfer is done.
- """
- if self.dtpInstance is None:
- raise BadCmdSequenceError('PORT or PASV required before RETR')
-
- try:
- newsegs = toSegments(self.workingDirectory, path)
- except InvalidPath:
- return defer.fail(FileNotFoundError(path))
-
- # XXX For now, just disable the timeout. Later we'll want to
- # leave it active and have the DTP connection reset it
- # periodically.
- self.setTimeout(None)
-
- # Put it back later
- def enableTimeout(result):
- self.setTimeout(self.factory.timeOut)
- return result
-
- # And away she goes
- if not self.binary:
- cons = ASCIIConsumerWrapper(self.dtpInstance)
- else:
- cons = self.dtpInstance
-
- def cbSent(result):
- return (TXFR_COMPLETE_OK,)
-
- def ebSent(err):
- log.msg("Unexpected error attempting to transmit file to client:")
- log.err(err)
- if err.check(FTPCmdError):
- return err
- return (CNX_CLOSED_TXFR_ABORTED,)
-
- def cbOpened(file):
- # Tell them what to doooo
- if self.dtpInstance.isConnected:
- self.reply(DATA_CNX_ALREADY_OPEN_START_XFR)
- else:
- self.reply(FILE_STATUS_OK_OPEN_DATA_CNX)
-
- d = file.send(cons)
- d.addCallbacks(cbSent, ebSent)
- return d
-
- def ebOpened(err):
- if not err.check(PermissionDeniedError, FileNotFoundError, IsADirectoryError):
- log.msg("Unexpected error attempting to open file for transmission:")
- log.err(err)
- if err.check(FTPCmdError):
- return (err.value.errorCode, '/'.join(newsegs))
- return (FILE_NOT_FOUND, '/'.join(newsegs))
-
- d = self.shell.openForReading(newsegs)
- d.addCallbacks(cbOpened, ebOpened)
- d.addBoth(enableTimeout)
-
- # Pass back Deferred that fires when the transfer is done
- return d
-
-
- def ftp_STOR(self, path):
- if self.dtpInstance is None:
- raise BadCmdSequenceError('PORT or PASV required before STOR')
-
- try:
- newsegs = toSegments(self.workingDirectory, path)
- except InvalidPath:
- return defer.fail(FileNotFoundError(path))
-
- # XXX For now, just disable the timeout. Later we'll want to
- # leave it active and have the DTP connection reset it
- # periodically.
- self.setTimeout(None)
-
- # Put it back later
- def enableTimeout(result):
- self.setTimeout(self.factory.timeOut)
- return result
-
- def cbSent(result):
- return (TXFR_COMPLETE_OK,)
-
- def ebSent(err):
- log.msg("Unexpected error receiving file from client:")
- log.err(err)
- if err.check(FTPCmdError):
- return err
- return (CNX_CLOSED_TXFR_ABORTED,)
-
- def cbConsumer(cons):
- if not self.binary:
- cons = ASCIIConsumerWrapper(cons)
-
- d = self.dtpInstance.registerConsumer(cons)
-
- # Tell them what to doooo
- if self.dtpInstance.isConnected:
- self.reply(DATA_CNX_ALREADY_OPEN_START_XFR)
- else:
- self.reply(FILE_STATUS_OK_OPEN_DATA_CNX)
-
- return d
-
- def cbOpened(file):
- d = file.receive()
- d.addCallback(cbConsumer)
- d.addCallback(lambda ignored: file.close())
- d.addCallbacks(cbSent, ebSent)
- return d
-
- def ebOpened(err):
- if not err.check(PermissionDeniedError, FileNotFoundError, IsNotADirectoryError):
- log.msg("Unexpected error attempting to open file for upload:")
- log.err(err)
- if isinstance(err.value, FTPCmdError):
- return (err.value.errorCode, '/'.join(newsegs))
- return (FILE_NOT_FOUND, '/'.join(newsegs))
-
- d = self.shell.openForWriting(newsegs)
- d.addCallbacks(cbOpened, ebOpened)
- d.addBoth(enableTimeout)
-
- # Pass back Deferred that fires when the transfer is done
- return d
-
-
- def ftp_SIZE(self, path):
- try:
- newsegs = toSegments(self.workingDirectory, path)
- except InvalidPath:
- return defer.fail(FileNotFoundError(path))
-
- def cbStat((size,)):
- return (FILE_STATUS, str(size))
-
- return self.shell.stat(newsegs, ('size',)).addCallback(cbStat)
-
-
- def ftp_MDTM(self, path):
- try:
- newsegs = toSegments(self.workingDirectory, path)
- except InvalidPath:
- return defer.fail(FileNotFoundError(path))
-
- def cbStat((modified,)):
- return (FILE_STATUS, time.strftime('%Y%m%d%H%M%S', time.gmtime(modified)))
-
- return self.shell.stat(newsegs, ('modified',)).addCallback(cbStat)
-
-
- def ftp_TYPE(self, type):
- p = type.upper()
- if p:
- f = getattr(self, 'type_' + p[0], None)
- if f is not None:
- return f(p[1:])
- return self.type_UNKNOWN(p)
- return (SYNTAX_ERR,)
-
- def type_A(self, code):
- if code == '' or code == 'N':
- self.binary = False
- return (TYPE_SET_OK, 'A' + code)
- else:
- return defer.fail(CmdArgSyntaxError(code))
-
- def type_I(self, code):
- if code == '':
- self.binary = True
- return (TYPE_SET_OK, 'I')
- else:
- return defer.fail(CmdArgSyntaxError(code))
-
- def type_UNKNOWN(self, code):
- return defer.fail(CmdNotImplementedForArgError(code))
-
-
-
- def ftp_SYST(self):
- return NAME_SYS_TYPE
-
-
- def ftp_STRU(self, structure):
- p = structure.upper()
- if p == 'F':
- return (CMD_OK,)
- return defer.fail(CmdNotImplementedForArgError(structure))
-
-
- def ftp_MODE(self, mode):
- p = mode.upper()
- if p == 'S':
- return (CMD_OK,)
- return defer.fail(CmdNotImplementedForArgError(mode))
-
-
- def ftp_MKD(self, path):
- try:
- newsegs = toSegments(self.workingDirectory, path)
- except InvalidPath:
- return defer.fail(FileNotFoundError(path))
- return self.shell.makeDirectory(newsegs).addCallback(lambda ign: (MKD_REPLY, path))
-
-
- def ftp_RMD(self, path):
- try:
- newsegs = toSegments(self.workingDirectory, path)
- except InvalidPath:
- return defer.fail(FileNotFoundError(path))
- return self.shell.removeDirectory(newsegs).addCallback(lambda ign: (REQ_FILE_ACTN_COMPLETED_OK,))
-
-
- def ftp_DELE(self, path):
- try:
- newsegs = toSegments(self.workingDirectory, path)
- except InvalidPath:
- return defer.fail(FileNotFoundError(path))
- return self.shell.removeFile(newsegs).addCallback(lambda ign: (REQ_FILE_ACTN_COMPLETED_OK,))
-
-
- def ftp_NOOP(self):
- return (CMD_OK,)
-
-
- def ftp_RNFR(self, fromName):
- self._fromName = fromName
- self.state = self.RENAMING
- return (REQ_FILE_ACTN_PENDING_FURTHER_INFO,)
-
-
- def ftp_RNTO(self, toName):
- fromName = self._fromName
- del self._fromName
- self.state = self.AUTHED
-
- try:
- fromsegs = toSegments(self.workingDirectory, fromName)
- tosegs = toSegments(self.workingDirectory, toName)
- except InvalidPath:
- return defer.fail(FileNotFoundError(fromName))
- return self.shell.rename(fromsegs, tosegs).addCallback(lambda ign: (REQ_FILE_ACTN_COMPLETED_OK,))
-
-
- def ftp_QUIT(self):
- self.reply(GOODBYE_MSG)
- self.transport.loseConnection()
- self.disconnected = True
-
-
- def cleanupDTP(self):
- """call when DTP connection exits
- """
- log.msg('cleanupDTP', debug=True)
-
- log.msg(self.dtpPort)
- dtpPort, self.dtpPort = self.dtpPort, None
- if interfaces.IListeningPort.providedBy(dtpPort):
- dtpPort.stopListening()
- elif interfaces.IConnector.providedBy(dtpPort):
- dtpPort.disconnect()
- else:
- assert False, "dtpPort should be an IListeningPort or IConnector, instead is %r" % (dtpPort,)
-
- self.dtpFactory.stopFactory()
- self.dtpFactory = None
-
- if self.dtpInstance is not None:
- self.dtpInstance = None
-
-
-class FTPFactory(policies.LimitTotalConnectionsFactory):
- """
- A factory for producing ftp protocol instances
-
- @ivar timeOut: the protocol interpreter's idle timeout time in seconds,
- default is 600 seconds.
-
- @ivar passivePortRange: value forwarded to C{protocol.passivePortRange}.
- @type passivePortRange: C{iterator}
- """
- protocol = FTP
- overflowProtocol = FTPOverflowProtocol
- allowAnonymous = True
- userAnonymous = 'anonymous'
- timeOut = 600
-
- welcomeMessage = "Twisted %s FTP Server" % (copyright.version,)
-
- passivePortRange = xrange(0, 1)
-
- def __init__(self, portal=None, userAnonymous='anonymous'):
- self.portal = portal
- self.userAnonymous = userAnonymous
- self.instances = []
-
- def buildProtocol(self, addr):
- p = policies.LimitTotalConnectionsFactory.buildProtocol(self, addr)
- if p is not None:
- p.wrappedProtocol.portal = self.portal
- p.wrappedProtocol.timeOut = self.timeOut
- p.wrappedProtocol.passivePortRange = self.passivePortRange
- return p
-
- def stopFactory(self):
- # make sure ftp instance's timeouts are set to None
- # to avoid reactor complaints
- [p.setTimeout(None) for p in self.instances if p.timeOut is not None]
- policies.LimitTotalConnectionsFactory.stopFactory(self)
-
-# -- Cred Objects --
-
-
-class IFTPShell(Interface):
- """
- An abstraction of the shell commands used by the FTP protocol for
- a given user account.
-
- All path names must be absolute.
- """
-
- def makeDirectory(path):
- """
- Create a directory.
-
- @param path: The path, as a list of segments, to create
- @type path: C{list} of C{unicode}
-
- @return: A Deferred which fires when the directory has been
- created, or which fails if the directory cannot be created.
- """
-
-
- def removeDirectory(path):
- """
- Remove a directory.
-
- @param path: The path, as a list of segments, to remove
- @type path: C{list} of C{unicode}
-
- @return: A Deferred which fires when the directory has been
- removed, or which fails if the directory cannot be removed.
- """
-
-
- def removeFile(path):
- """
- Remove a file.
-
- @param path: The path, as a list of segments, to remove
- @type path: C{list} of C{unicode}
-
- @return: A Deferred which fires when the file has been
- removed, or which fails if the file cannot be removed.
- """
-
-
- def rename(fromPath, toPath):
- """
- Rename a file or directory.
-
- @param fromPath: The current name of the path.
- @type fromPath: C{list} of C{unicode}
-
- @param toPath: The desired new name of the path.
- @type toPath: C{list} of C{unicode}
-
- @return: A Deferred which fires when the path has been
- renamed, or which fails if the path cannot be renamed.
- """
-
-
- def access(path):
- """
- Determine whether access to the given path is allowed.
-
- @param path: The path, as a list of segments
-
- @return: A Deferred which fires with None if access is allowed
- or which fails with a specific exception type if access is
- denied.
- """
-
-
- def stat(path, keys=()):
- """
- Retrieve information about the given path.
-
- This is like list, except it will never return results about
- child paths.
- """
-
-
- def list(path, keys=()):
- """
- Retrieve information about the given path.
-
- If the path represents a non-directory, the result list should
- have only one entry with information about that non-directory.
- Otherwise, the result list should have an element for each
- child of the directory.
-
- @param path: The path, as a list of segments, to list
- @type path: C{list} of C{unicode}
-
- @param keys: A tuple of keys desired in the resulting
- dictionaries.
-
- @return: A Deferred which fires with a list of (name, list),
- where the name is the name of the entry as a unicode string
- and each list contains values corresponding to the requested
- keys. The following are possible elements of keys, and the
- values which should be returned for them:
-
- - C{'size'}: size in bytes, as an integer (this is kinda required)
-
- - C{'directory'}: boolean indicating the type of this entry
-
- - C{'permissions'}: a bitvector (see os.stat(foo).st_mode)
-
- - C{'hardlinks'}: Number of hard links to this entry
-
- - C{'modified'}: number of seconds since the epoch since entry was
- modified
-
- - C{'owner'}: string indicating the user owner of this entry
-
- - C{'group'}: string indicating the group owner of this entry
- """
-
-
- def openForReading(path):
- """
- @param path: The path, as a list of segments, to open
- @type path: C{list} of C{unicode}
-
- @rtype: C{Deferred} which will fire with L{IReadFile}
- """
-
-
- def openForWriting(path):
- """
- @param path: The path, as a list of segments, to open
- @type path: C{list} of C{unicode}
-
- @rtype: C{Deferred} which will fire with L{IWriteFile}
- """
-
-
-
-class IReadFile(Interface):
- """
- A file out of which bytes may be read.
- """
-
- def send(consumer):
- """
- Produce the contents of the given path to the given consumer. This
- method may only be invoked once on each provider.
-
- @type consumer: C{IConsumer}
-
- @return: A Deferred which fires when the file has been
- consumed completely.
- """
-
-
-
-class IWriteFile(Interface):
- """
- A file into which bytes may be written.
- """
-
- def receive():
- """
- Create a consumer which will write to this file. This method may
- only be invoked once on each provider.
-
- @rtype: C{Deferred} of C{IConsumer}
- """
-
- def close():
- """
- Perform any post-write work that needs to be done. This method may
- only be invoked once on each provider, and will always be invoked
- after receive().
-
- @rtype: C{Deferred} of anything: the value is ignored. The FTP client
- will not see their upload request complete until this Deferred has
- been fired.
- """
-
-def _getgroups(uid):
- """Return the primary and supplementary groups for the given UID.
-
- @type uid: C{int}
- """
- result = []
- pwent = pwd.getpwuid(uid)
-
- result.append(pwent.pw_gid)
-
- for grent in grp.getgrall():
- if pwent.pw_name in grent.gr_mem:
- result.append(grent.gr_gid)
-
- return result
-
-
-def _testPermissions(uid, gid, spath, mode='r'):
- """
- checks to see if uid has proper permissions to access path with mode
-
- @type uid: C{int}
- @param uid: numeric user id
-
- @type gid: C{int}
- @param gid: numeric group id
-
- @type spath: C{str}
- @param spath: the path on the server to test
-
- @type mode: C{str}
- @param mode: 'r' or 'w' (read or write)
-
- @rtype: C{bool}
- @return: True if the given credentials have the specified form of
- access to the given path
- """
- if mode == 'r':
- usr = stat.S_IRUSR
- grp = stat.S_IRGRP
- oth = stat.S_IROTH
- amode = os.R_OK
- elif mode == 'w':
- usr = stat.S_IWUSR
- grp = stat.S_IWGRP
- oth = stat.S_IWOTH
- amode = os.W_OK
- else:
- raise ValueError("Invalid mode %r: must specify 'r' or 'w'" % (mode,))
-
- access = False
- if os.path.exists(spath):
- if uid == 0:
- access = True
- else:
- s = os.stat(spath)
- if usr & s.st_mode and uid == s.st_uid:
- access = True
- elif grp & s.st_mode and gid in _getgroups(uid):
- access = True
- elif oth & s.st_mode:
- access = True
-
- if access:
- if not os.access(spath, amode):
- access = False
- log.msg("Filesystem grants permission to UID %d but it is inaccessible to me running as UID %d" % (
- uid, os.getuid()))
- return access
-
-
-
-class FTPAnonymousShell(object):
- """
- An anonymous implementation of IFTPShell
-
- @type filesystemRoot: L{twisted.python.filepath.FilePath}
- @ivar filesystemRoot: The path which is considered the root of
- this shell.
- """
- implements(IFTPShell)
-
- def __init__(self, filesystemRoot):
- self.filesystemRoot = filesystemRoot
-
-
- def _path(self, path):
- return reduce(filepath.FilePath.child, path, self.filesystemRoot)
-
-
- def makeDirectory(self, path):
- return defer.fail(AnonUserDeniedError())
-
-
- def removeDirectory(self, path):
- return defer.fail(AnonUserDeniedError())
-
-
- def removeFile(self, path):
- return defer.fail(AnonUserDeniedError())
-
-
- def rename(self, fromPath, toPath):
- return defer.fail(AnonUserDeniedError())
-
-
- def receive(self, path):
- path = self._path(path)
- return defer.fail(AnonUserDeniedError())
-
-
- def openForReading(self, path):
- """
- Open C{path} for reading.
-
- @param path: The path, as a list of segments, to open.
- @type path: C{list} of C{unicode}
- @return: A L{Deferred} is returned that will fire with an object
- implementing L{IReadFile} if the file is successfully opened. If
- C{path} is a directory, or if an exception is raised while trying
- to open the file, the L{Deferred} will fire with an error.
- """
- p = self._path(path)
- if p.isdir():
- # Normally, we would only check for EISDIR in open, but win32
- # returns EACCES in this case, so we check before
- return defer.fail(IsADirectoryError(path))
- try:
- f = p.open('r')
- except (IOError, OSError), e:
- return errnoToFailure(e.errno, path)
- except:
- return defer.fail()
- else:
- return defer.succeed(_FileReader(f))
-
-
- def openForWriting(self, path):
- """
- Reject write attempts by anonymous users with
- L{PermissionDeniedError}.
- """
- return defer.fail(PermissionDeniedError("STOR not allowed"))
-
-
- def access(self, path):
- p = self._path(path)
- if not p.exists():
- # Again, win32 doesn't report a sane error after, so let's fail
- # early if we can
- return defer.fail(FileNotFoundError(path))
- # For now, just see if we can os.listdir() it
- try:
- p.listdir()
- except (IOError, OSError), e:
- return errnoToFailure(e.errno, path)
- except:
- return defer.fail()
- else:
- return defer.succeed(None)
-
-
- def stat(self, path, keys=()):
- p = self._path(path)
- if p.isdir():
- try:
- statResult = self._statNode(p, keys)
- except (IOError, OSError), e:
- return errnoToFailure(e.errno, path)
- except:
- return defer.fail()
- else:
- return defer.succeed(statResult)
- else:
- return self.list(path, keys).addCallback(lambda res: res[0][1])
-
-
- def list(self, path, keys=()):
- """
- Return the list of files at given C{path}, adding C{keys} stat
- informations if specified.
-
- @param path: the directory or file to check.
- @type path: C{str}
-
- @param keys: the list of desired metadata
- @type keys: C{list} of C{str}
- """
- filePath = self._path(path)
- if filePath.isdir():
- entries = filePath.listdir()
- fileEntries = [filePath.child(p) for p in entries]
- elif filePath.isfile():
- entries = [os.path.join(*filePath.segmentsFrom(self.filesystemRoot))]
- fileEntries = [filePath]
- else:
- return defer.fail(FileNotFoundError(path))
-
- results = []
- for fileName, filePath in zip(entries, fileEntries):
- ent = []
- results.append((fileName, ent))
- if keys:
- try:
- ent.extend(self._statNode(filePath, keys))
- except (IOError, OSError), e:
- return errnoToFailure(e.errno, fileName)
- except:
- return defer.fail()
-
- return defer.succeed(results)
-
-
- def _statNode(self, filePath, keys):
- """
- Shortcut method to get stat info on a node.
-
- @param filePath: the node to stat.
- @type filePath: C{filepath.FilePath}
-
- @param keys: the stat keys to get.
- @type keys: C{iterable}
- """
- filePath.restat()
- return [getattr(self, '_stat_' + k)(filePath.statinfo) for k in keys]
-
- _stat_size = operator.attrgetter('st_size')
- _stat_permissions = operator.attrgetter('st_mode')
- _stat_hardlinks = operator.attrgetter('st_nlink')
- _stat_modified = operator.attrgetter('st_mtime')
-
-
- def _stat_owner(self, st):
- if pwd is not None:
- try:
- return pwd.getpwuid(st.st_uid)[0]
- except KeyError:
- pass
- return str(st.st_uid)
-
-
- def _stat_group(self, st):
- if grp is not None:
- try:
- return grp.getgrgid(st.st_gid)[0]
- except KeyError:
- pass
- return str(st.st_gid)
-
-
- def _stat_directory(self, st):
- return bool(st.st_mode & stat.S_IFDIR)
-
-
-
-class _FileReader(object):
- implements(IReadFile)
-
- def __init__(self, fObj):
- self.fObj = fObj
- self._send = False
-
- def _close(self, passthrough):
- self._send = True
- self.fObj.close()
- return passthrough
-
- def send(self, consumer):
- assert not self._send, "Can only call IReadFile.send *once* per instance"
- self._send = True
- d = basic.FileSender().beginFileTransfer(self.fObj, consumer)
- d.addBoth(self._close)
- return d
-
-
-
-class FTPShell(FTPAnonymousShell):
- """
- An authenticated implementation of L{IFTPShell}.
- """
-
- def makeDirectory(self, path):
- p = self._path(path)
- try:
- p.makedirs()
- except (IOError, OSError), e:
- return errnoToFailure(e.errno, path)
- except:
- return defer.fail()
- else:
- return defer.succeed(None)
-
-
- def removeDirectory(self, path):
- p = self._path(path)
- if p.isfile():
- # Win32 returns the wrong errno when rmdir is called on a file
- # instead of a directory, so as we have the info here, let's fail
- # early with a pertinent error
- return defer.fail(IsNotADirectoryError(path))
- try:
- os.rmdir(p.path)
- except (IOError, OSError), e:
- return errnoToFailure(e.errno, path)
- except:
- return defer.fail()
- else:
- return defer.succeed(None)
-
-
- def removeFile(self, path):
- p = self._path(path)
- if p.isdir():
- # Win32 returns the wrong errno when remove is called on a
- # directory instead of a file, so as we have the info here,
- # let's fail early with a pertinent error
- return defer.fail(IsADirectoryError(path))
- try:
- p.remove()
- except (IOError, OSError), e:
- return errnoToFailure(e.errno, path)
- except:
- return defer.fail()
- else:
- return defer.succeed(None)
-
-
- def rename(self, fromPath, toPath):
- fp = self._path(fromPath)
- tp = self._path(toPath)
- try:
- os.rename(fp.path, tp.path)
- except (IOError, OSError), e:
- return errnoToFailure(e.errno, fromPath)
- except:
- return defer.fail()
- else:
- return defer.succeed(None)
-
-
- def openForWriting(self, path):
- """
- Open C{path} for writing.
-
- @param path: The path, as a list of segments, to open.
- @type path: C{list} of C{unicode}
- @return: A L{Deferred} is returned that will fire with an object
- implementing L{IWriteFile} if the file is successfully opened. If
- C{path} is a directory, or if an exception is raised while trying
- to open the file, the L{Deferred} will fire with an error.
- """
- p = self._path(path)
- if p.isdir():
- # Normally, we would only check for EISDIR in open, but win32
- # returns EACCES in this case, so we check before
- return defer.fail(IsADirectoryError(path))
- try:
- fObj = p.open('w')
- except (IOError, OSError), e:
- return errnoToFailure(e.errno, path)
- except:
- return defer.fail()
- return defer.succeed(_FileWriter(fObj))
-
-
-
-class _FileWriter(object):
- implements(IWriteFile)
-
- def __init__(self, fObj):
- self.fObj = fObj
- self._receive = False
-
- def receive(self):
- assert not self._receive, "Can only call IWriteFile.receive *once* per instance"
- self._receive = True
- # FileConsumer will close the file object
- return defer.succeed(FileConsumer(self.fObj))
-
- def close(self):
- return defer.succeed(None)
-
-
-
-class BaseFTPRealm:
- """
- Base class for simple FTP realms which provides an easy hook for specifying
- the home directory for each user.
- """
- implements(portal.IRealm)
-
- def __init__(self, anonymousRoot):
- self.anonymousRoot = filepath.FilePath(anonymousRoot)
-
-
- def getHomeDirectory(self, avatarId):
- """
- Return a L{FilePath} representing the home directory of the given
- avatar. Override this in a subclass.
-
- @param avatarId: A user identifier returned from a credentials checker.
- @type avatarId: C{str}
-
- @rtype: L{FilePath}
- """
- raise NotImplementedError(
- "%r did not override getHomeDirectory" % (self.__class__,))
-
-
- def requestAvatar(self, avatarId, mind, *interfaces):
- for iface in interfaces:
- if iface is IFTPShell:
- if avatarId is checkers.ANONYMOUS:
- avatar = FTPAnonymousShell(self.anonymousRoot)
- else:
- avatar = FTPShell(self.getHomeDirectory(avatarId))
- return (IFTPShell, avatar,
- getattr(avatar, 'logout', lambda: None))
- raise NotImplementedError(
- "Only IFTPShell interface is supported by this realm")
-
-
-
-class FTPRealm(BaseFTPRealm):
- """
- @type anonymousRoot: L{twisted.python.filepath.FilePath}
- @ivar anonymousRoot: Root of the filesystem to which anonymous
- users will be granted access.
-
- @type userHome: L{filepath.FilePath}
- @ivar userHome: Root of the filesystem containing user home directories.
- """
- def __init__(self, anonymousRoot, userHome='/home'):
- BaseFTPRealm.__init__(self, anonymousRoot)
- self.userHome = filepath.FilePath(userHome)
-
-
- def getHomeDirectory(self, avatarId):
- """
- Use C{avatarId} as a single path segment to construct a child of
- C{self.userHome} and return that child.
- """
- return self.userHome.child(avatarId)
-
-
-
-class SystemFTPRealm(BaseFTPRealm):
- """
- L{SystemFTPRealm} uses system user account information to decide what the
- home directory for a particular avatarId is.
-
- This works on POSIX but probably is not reliable on Windows.
- """
- def getHomeDirectory(self, avatarId):
- """
- Return the system-defined home directory of the system user account with
- the name C{avatarId}.
- """
- path = os.path.expanduser('~' + avatarId)
- if path.startswith('~'):
- raise cred_error.UnauthorizedLogin()
- return filepath.FilePath(path)
-
-
-
-# --- FTP CLIENT -------------------------------------------------------------
-
-####
-# And now for the client...
-
-# Notes:
-# * Reference: http://cr.yp.to/ftp.html
-# * FIXME: Does not support pipelining (which is not supported by all
-# servers anyway). This isn't a functionality limitation, just a
-# small performance issue.
-# * Only has a rudimentary understanding of FTP response codes (although
-# the full response is passed to the caller if they so choose).
-# * Assumes that USER and PASS should always be sent
-# * Always sets TYPE I (binary mode)
-# * Doesn't understand any of the weird, obscure TELNET stuff (\377...)
-# * FIXME: Doesn't share any code with the FTPServer
-
-class ConnectionLost(FTPError):
- pass
-
-class CommandFailed(FTPError):
- pass
-
-class BadResponse(FTPError):
- pass
-
-class UnexpectedResponse(FTPError):
- pass
-
-class UnexpectedData(FTPError):
- pass
-
-class FTPCommand:
- def __init__(self, text=None, public=0):
- self.text = text
- self.deferred = defer.Deferred()
- self.ready = 1
- self.public = public
- self.transferDeferred = None
-
- def fail(self, failure):
- if self.public:
- self.deferred.errback(failure)
-
-
-class ProtocolWrapper(protocol.Protocol):
- def __init__(self, original, deferred):
- self.original = original
- self.deferred = deferred
- def makeConnection(self, transport):
- self.original.makeConnection(transport)
- def dataReceived(self, data):
- self.original.dataReceived(data)
- def connectionLost(self, reason):
- self.original.connectionLost(reason)
- # Signal that transfer has completed
- self.deferred.callback(None)
-
-
-
-class IFinishableConsumer(interfaces.IConsumer):
- """
- A Consumer for producers that finish.
-
- @since: 11.0
- """
-
- def finish():
- """
- The producer has finished producing.
- """
-
-
-
-class SenderProtocol(protocol.Protocol):
- implements(IFinishableConsumer)
-
- def __init__(self):
- # Fired upon connection
- self.connectedDeferred = defer.Deferred()
-
- # Fired upon disconnection
- self.deferred = defer.Deferred()
-
- #Protocol stuff
- def dataReceived(self, data):
- raise UnexpectedData(
- "Received data from the server on a "
- "send-only data-connection"
- )
-
- def makeConnection(self, transport):
- protocol.Protocol.makeConnection(self, transport)
- self.connectedDeferred.callback(self)
-
- def connectionLost(self, reason):
- if reason.check(error.ConnectionDone):
- self.deferred.callback('connection done')
- else:
- self.deferred.errback(reason)
-
- #IFinishableConsumer stuff
- def write(self, data):
- self.transport.write(data)
-
- def registerProducer(self, producer, streaming):
- """
- Register the given producer with our transport.
- """
- self.transport.registerProducer(producer, streaming)
-
- def unregisterProducer(self):
- """
- Unregister the previously registered producer.
- """
- self.transport.unregisterProducer()
-
- def finish(self):
- self.transport.loseConnection()
-
-
-def decodeHostPort(line):
- """Decode an FTP response specifying a host and port.
-
- @return: a 2-tuple of (host, port).
- """
- abcdef = re.sub('[^0-9, ]', '', line)
- parsed = [int(p.strip()) for p in abcdef.split(',')]
- for x in parsed:
- if x < 0 or x > 255:
- raise ValueError("Out of range", line, x)
- a, b, c, d, e, f = parsed
- host = "%s.%s.%s.%s" % (a, b, c, d)
- port = (int(e) << 8) + int(f)
- return host, port
-
-def encodeHostPort(host, port):
- numbers = host.split('.') + [str(port >> 8), str(port % 256)]
- return ','.join(numbers)
-
-def _unwrapFirstError(failure):
- failure.trap(defer.FirstError)
- return failure.value.subFailure
-
-class FTPDataPortFactory(protocol.ServerFactory):
- """Factory for data connections that use the PORT command
-
- (i.e. "active" transfers)
- """
- noisy = 0
- def buildProtocol(self, addr):
- # This is a bit hackish -- we already have a Protocol instance,
- # so just return it instead of making a new one
- # FIXME: Reject connections from the wrong address/port
- # (potential security problem)
- self.protocol.factory = self
- self.port.loseConnection()
- return self.protocol
-
-
-class FTPClientBasic(basic.LineReceiver):
- """
- Foundations of an FTP client.
- """
- debug = False
-
- def __init__(self):
- self.actionQueue = []
- self.greeting = None
- self.nextDeferred = defer.Deferred().addCallback(self._cb_greeting)
- self.nextDeferred.addErrback(self.fail)
- self.response = []
- self._failed = 0
-
- def fail(self, error):
- """
- Give an error to any queued deferreds.
- """
- self._fail(error)
-
- def _fail(self, error):
- """
- Errback all queued deferreds.
- """
- if self._failed:
- # We're recursing; bail out here for simplicity
- return error
- self._failed = 1
- if self.nextDeferred:
- try:
- self.nextDeferred.errback(failure.Failure(ConnectionLost('FTP connection lost', error)))
- except defer.AlreadyCalledError:
- pass
- for ftpCommand in self.actionQueue:
- ftpCommand.fail(failure.Failure(ConnectionLost('FTP connection lost', error)))
- return error
-
- def _cb_greeting(self, greeting):
- self.greeting = greeting
-
- def sendLine(self, line):
- """
- (Private) Sends a line, unless line is None.
- """
- if line is None:
- return
- basic.LineReceiver.sendLine(self, line)
-
- def sendNextCommand(self):
- """
- (Private) Processes the next command in the queue.
- """
- ftpCommand = self.popCommandQueue()
- if ftpCommand is None:
- self.nextDeferred = None
- return
- if not ftpCommand.ready:
- self.actionQueue.insert(0, ftpCommand)
- reactor.callLater(1.0, self.sendNextCommand)
- self.nextDeferred = None
- return
-
- # FIXME: this if block doesn't belong in FTPClientBasic, it belongs in
- # FTPClient.
- if ftpCommand.text == 'PORT':
- self.generatePortCommand(ftpCommand)
-
- if self.debug:
- log.msg('<-- %s' % ftpCommand.text)
- self.nextDeferred = ftpCommand.deferred
- self.sendLine(ftpCommand.text)
-
- def queueCommand(self, ftpCommand):
- """
- Add an FTPCommand object to the queue.
-
- If it's the only thing in the queue, and we are connected and we aren't
- waiting for a response of an earlier command, the command will be sent
- immediately.
-
- @param ftpCommand: an L{FTPCommand}
- """
- self.actionQueue.append(ftpCommand)
- if (len(self.actionQueue) == 1 and self.transport is not None and
- self.nextDeferred is None):
- self.sendNextCommand()
-
- def queueStringCommand(self, command, public=1):
- """
- Queues a string to be issued as an FTP command
-
- @param command: string of an FTP command to queue
- @param public: a flag intended for internal use by FTPClient. Don't
- change it unless you know what you're doing.
-
- @return: a L{Deferred} that will be called when the response to the
- command has been received.
- """
- ftpCommand = FTPCommand(command, public)
- self.queueCommand(ftpCommand)
- return ftpCommand.deferred
-
- def popCommandQueue(self):
- """
- Return the front element of the command queue, or None if empty.
- """
- if self.actionQueue:
- return self.actionQueue.pop(0)
- else:
- return None
-
- def queueLogin(self, username, password):
- """
- Login: send the username, send the password.
-
- If the password is C{None}, the PASS command won't be sent. Also, if
- the response to the USER command has a response code of 230 (User logged
- in), then PASS won't be sent either.
- """
- # Prepare the USER command
- deferreds = []
- userDeferred = self.queueStringCommand('USER ' + username, public=0)
- deferreds.append(userDeferred)
-
- # Prepare the PASS command (if a password is given)
- if password is not None:
- passwordCmd = FTPCommand('PASS ' + password, public=0)
- self.queueCommand(passwordCmd)
- deferreds.append(passwordCmd.deferred)
-
- # Avoid sending PASS if the response to USER is 230.
- # (ref: http://cr.yp.to/ftp/user.html#user)
- def cancelPasswordIfNotNeeded(response):
- if response[0].startswith('230'):
- # No password needed!
- self.actionQueue.remove(passwordCmd)
- return response
- userDeferred.addCallback(cancelPasswordIfNotNeeded)
-
- # Error handling.
- for deferred in deferreds:
- # If something goes wrong, call fail
- deferred.addErrback(self.fail)
- # But also swallow the error, so we don't cause spurious errors
- deferred.addErrback(lambda x: None)
-
- def lineReceived(self, line):
- """
- (Private) Parses the response messages from the FTP server.
- """
- # Add this line to the current response
- if self.debug:
- log.msg('--> %s' % line)
- self.response.append(line)
-
- # Bail out if this isn't the last line of a response
- # The last line of response starts with 3 digits followed by a space
- codeIsValid = re.match(r'\d{3} ', line)
- if not codeIsValid:
- return
-
- code = line[0:3]
-
- # Ignore marks
- if code[0] == '1':
- return
-
- # Check that we were expecting a response
- if self.nextDeferred is None:
- self.fail(UnexpectedResponse(self.response))
- return
-
- # Reset the response
- response = self.response
- self.response = []
-
- # Look for a success or error code, and call the appropriate callback
- if code[0] in ('2', '3'):
- # Success
- self.nextDeferred.callback(response)
- elif code[0] in ('4', '5'):
- # Failure
- self.nextDeferred.errback(failure.Failure(CommandFailed(response)))
- else:
- # This shouldn't happen unless something screwed up.
- log.msg('Server sent invalid response code %s' % (code,))
- self.nextDeferred.errback(failure.Failure(BadResponse(response)))
-
- # Run the next command
- self.sendNextCommand()
-
- def connectionLost(self, reason):
- self._fail(reason)
-
-
-
-class _PassiveConnectionFactory(protocol.ClientFactory):
- noisy = False
-
- def __init__(self, protoInstance):
- self.protoInstance = protoInstance
-
- def buildProtocol(self, ignored):
- self.protoInstance.factory = self
- return self.protoInstance
-
- def clientConnectionFailed(self, connector, reason):
- e = FTPError('Connection Failed', reason)
- self.protoInstance.deferred.errback(e)
-
-
-
-class FTPClient(FTPClientBasic):
- """
- L{FTPClient} is a client implementation of the FTP protocol which
- exposes FTP commands as methods which return L{Deferred}s.
-
- Each command method returns a L{Deferred} which is called back when a
- successful response code (2xx or 3xx) is received from the server or
- which is error backed if an error response code (4xx or 5xx) is received
- from the server or if a protocol violation occurs. If an error response
- code is received, the L{Deferred} fires with a L{Failure} wrapping a
- L{CommandFailed} instance. The L{CommandFailed} instance is created
- with a list of the response lines received from the server.
-
- See U{RFC 959<http://www.ietf.org/rfc/rfc959.txt>} for error code
- definitions.
-
- Both active and passive transfers are supported.
-
- @ivar passive: See description in __init__.
- """
- connectFactory = reactor.connectTCP
-
- def __init__(self, username='anonymous',
- password='twisted@twistedmatrix.com',
- passive=1):
- """
- Constructor.
-
- I will login as soon as I receive the welcome message from the server.
-
- @param username: FTP username
- @param password: FTP password
- @param passive: flag that controls if I use active or passive data
- connections. You can also change this after construction by
- assigning to C{self.passive}.
- """
- FTPClientBasic.__init__(self)
- self.queueLogin(username, password)
-
- self.passive = passive
-
- def fail(self, error):
- """
- Disconnect, and also give an error to any queued deferreds.
- """
- self.transport.loseConnection()
- self._fail(error)
-
- def receiveFromConnection(self, commands, protocol):
- """
- Retrieves a file or listing generated by the given command,
- feeding it to the given protocol.
-
- @param commands: list of strings of FTP commands to execute then receive
- the results of (e.g. C{LIST}, C{RETR})
- @param protocol: A L{Protocol} B{instance} e.g. an
- L{FTPFileListProtocol}, or something that can be adapted to one.
- Typically this will be an L{IConsumer} implementation.
-
- @return: L{Deferred}.
- """
- protocol = interfaces.IProtocol(protocol)
- wrapper = ProtocolWrapper(protocol, defer.Deferred())
- return self._openDataConnection(commands, wrapper)
-
- def queueLogin(self, username, password):
- """
- Login: send the username, send the password, and
- set retrieval mode to binary
- """
- FTPClientBasic.queueLogin(self, username, password)
- d = self.queueStringCommand('TYPE I', public=0)
- # If something goes wrong, call fail
- d.addErrback(self.fail)
- # But also swallow the error, so we don't cause spurious errors
- d.addErrback(lambda x: None)
-
- def sendToConnection(self, commands):
- """
- XXX
-
- @return: A tuple of two L{Deferred}s:
- - L{Deferred} L{IFinishableConsumer}. You must call
- the C{finish} method on the IFinishableConsumer when the file
- is completely transferred.
- - L{Deferred} list of control-connection responses.
- """
- s = SenderProtocol()
- r = self._openDataConnection(commands, s)
- return (s.connectedDeferred, r)
-
- def _openDataConnection(self, commands, protocol):
- """
- This method returns a DeferredList.
- """
- cmds = [FTPCommand(command, public=1) for command in commands]
- cmdsDeferred = defer.DeferredList([cmd.deferred for cmd in cmds],
- fireOnOneErrback=True, consumeErrors=True)
- cmdsDeferred.addErrback(_unwrapFirstError)
-
- if self.passive:
- # Hack: use a mutable object to sneak a variable out of the
- # scope of doPassive
- _mutable = [None]
- def doPassive(response):
- """Connect to the port specified in the response to PASV"""
- host, port = decodeHostPort(response[-1][4:])
-
- f = _PassiveConnectionFactory(protocol)
- _mutable[0] = self.connectFactory(host, port, f)
-
- pasvCmd = FTPCommand('PASV')
- self.queueCommand(pasvCmd)
- pasvCmd.deferred.addCallback(doPassive).addErrback(self.fail)
-
- results = [cmdsDeferred, pasvCmd.deferred, protocol.deferred]
- d = defer.DeferredList(results, fireOnOneErrback=True, consumeErrors=True)
- d.addErrback(_unwrapFirstError)
-
- # Ensure the connection is always closed
- def close(x, m=_mutable):
- m[0] and m[0].disconnect()
- return x
- d.addBoth(close)
-
- else:
- # We just place a marker command in the queue, and will fill in
- # the host and port numbers later (see generatePortCommand)
- portCmd = FTPCommand('PORT')
-
- # Ok, now we jump through a few hoops here.
- # This is the problem: a transfer is not to be trusted as complete
- # until we get both the "226 Transfer complete" message on the
- # control connection, and the data socket is closed. Thus, we use
- # a DeferredList to make sure we only fire the callback at the
- # right time.
-
- portCmd.transferDeferred = protocol.deferred
- portCmd.protocol = protocol
- portCmd.deferred.addErrback(portCmd.transferDeferred.errback)
- self.queueCommand(portCmd)
-
- # Create dummy functions for the next callback to call.
- # These will also be replaced with real functions in
- # generatePortCommand.
- portCmd.loseConnection = lambda result: result
- portCmd.fail = lambda error: error
-
- # Ensure that the connection always gets closed
- cmdsDeferred.addErrback(lambda e, pc=portCmd: pc.fail(e) or e)
-
- results = [cmdsDeferred, portCmd.deferred, portCmd.transferDeferred]
- d = defer.DeferredList(results, fireOnOneErrback=True, consumeErrors=True)
- d.addErrback(_unwrapFirstError)
-
- for cmd in cmds:
- self.queueCommand(cmd)
- return d
-
- def generatePortCommand(self, portCmd):
- """
- (Private) Generates the text of a given PORT command.
- """
-
- # The problem is that we don't create the listening port until we need
- # it for various reasons, and so we have to muck about to figure out
- # what interface and port it's listening on, and then finally we can
- # create the text of the PORT command to send to the FTP server.
-
- # FIXME: This method is far too ugly.
-
- # FIXME: The best solution is probably to only create the data port
- # once per FTPClient, and just recycle it for each new download.
- # This should be ok, because we don't pipeline commands.
-
- # Start listening on a port
- factory = FTPDataPortFactory()
- factory.protocol = portCmd.protocol
- listener = reactor.listenTCP(0, factory)
- factory.port = listener
-
- # Ensure we close the listening port if something goes wrong
- def listenerFail(error, listener=listener):
- if listener.connected:
- listener.loseConnection()
- return error
- portCmd.fail = listenerFail
-
- # Construct crufty FTP magic numbers that represent host & port
- host = self.transport.getHost().host
- port = listener.getHost().port
- portCmd.text = 'PORT ' + encodeHostPort(host, port)
-
- def escapePath(self, path):
- """
- Returns a FTP escaped path (replace newlines with nulls).
- """
- # Escape newline characters
- return path.replace('\n', '\0')
-
- def retrieveFile(self, path, protocol, offset=0):
- """
- Retrieve a file from the given path
-
- This method issues the 'RETR' FTP command.
-
- The file is fed into the given Protocol instance. The data connection
- will be passive if self.passive is set.
-
- @param path: path to file that you wish to receive.
- @param protocol: a L{Protocol} instance.
- @param offset: offset to start downloading from
-
- @return: L{Deferred}
- """
- cmds = ['RETR ' + self.escapePath(path)]
- if offset:
- cmds.insert(0, ('REST ' + str(offset)))
- return self.receiveFromConnection(cmds, protocol)
-
- retr = retrieveFile
-
- def storeFile(self, path, offset=0):
- """
- Store a file at the given path.
-
- This method issues the 'STOR' FTP command.
-
- @return: A tuple of two L{Deferred}s:
- - L{Deferred} L{IFinishableConsumer}. You must call
- the C{finish} method on the IFinishableConsumer when the file
- is completely transferred.
- - L{Deferred} list of control-connection responses.
- """
- cmds = ['STOR ' + self.escapePath(path)]
- if offset:
- cmds.insert(0, ('REST ' + str(offset)))
- return self.sendToConnection(cmds)
-
- stor = storeFile
-
-
- def rename(self, pathFrom, pathTo):
- """
- Rename a file.
-
- This method issues the I{RNFR}/I{RNTO} command sequence to rename
- C{pathFrom} to C{pathTo}.
-
- @param: pathFrom: the absolute path to the file to be renamed
- @type pathFrom: C{str}
-
- @param: pathTo: the absolute path to rename the file to.
- @type pathTo: C{str}
-
- @return: A L{Deferred} which fires when the rename operation has
- succeeded or failed. If it succeeds, the L{Deferred} is called
- back with a two-tuple of lists. The first list contains the
- responses to the I{RNFR} command. The second list contains the
- responses to the I{RNTO} command. If either I{RNFR} or I{RNTO}
- fails, the L{Deferred} is errbacked with L{CommandFailed} or
- L{BadResponse}.
- @rtype: L{Deferred}
-
- @since: 8.2
- """
- renameFrom = self.queueStringCommand('RNFR ' + self.escapePath(pathFrom))
- renameTo = self.queueStringCommand('RNTO ' + self.escapePath(pathTo))
-
- fromResponse = []
-
- # Use a separate Deferred for the ultimate result so that Deferred
- # chaining can't interfere with its result.
- result = defer.Deferred()
- # Bundle up all the responses
- result.addCallback(lambda toResponse: (fromResponse, toResponse))
-
- def ebFrom(failure):
- # Make sure the RNTO doesn't run if the RNFR failed.
- self.popCommandQueue()
- result.errback(failure)
-
- # Save the RNFR response to pass to the result Deferred later
- renameFrom.addCallbacks(fromResponse.extend, ebFrom)
-
- # Hook up the RNTO to the result Deferred as well
- renameTo.chainDeferred(result)
-
- return result
-
-
- def list(self, path, protocol):
- """
- Retrieve a file listing into the given protocol instance.
-
- This method issues the 'LIST' FTP command.
-
- @param path: path to get a file listing for.
- @param protocol: a L{Protocol} instance, probably a
- L{FTPFileListProtocol} instance. It can cope with most common file
- listing formats.
-
- @return: L{Deferred}
- """
- if path is None:
- path = ''
- return self.receiveFromConnection(['LIST ' + self.escapePath(path)], protocol)
-
-
- def nlst(self, path, protocol):
- """
- Retrieve a short file listing into the given protocol instance.
-
- This method issues the 'NLST' FTP command.
-
- NLST (should) return a list of filenames, one per line.
-
- @param path: path to get short file listing for.
- @param protocol: a L{Protocol} instance.
- """
- if path is None:
- path = ''
- return self.receiveFromConnection(['NLST ' + self.escapePath(path)], protocol)
-
-
- def cwd(self, path):
- """
- Issues the CWD (Change Working Directory) command. It's also
- available as changeDirectory, which parses the result.
-
- @return: a L{Deferred} that will be called when done.
- """
- return self.queueStringCommand('CWD ' + self.escapePath(path))
-
-
- def changeDirectory(self, path):
- """
- Change the directory on the server and parse the result to determine
- if it was successful or not.
-
- @type path: C{str}
- @param path: The path to which to change.
-
- @return: a L{Deferred} which will be called back when the directory
- change has succeeded or errbacked if an error occurrs.
- """
- warnings.warn(
- "FTPClient.changeDirectory is deprecated in Twisted 8.2 and "
- "newer. Use FTPClient.cwd instead.",
- category=DeprecationWarning,
- stacklevel=2)
-
- def cbResult(result):
- if result[-1][:3] != '250':
- return failure.Failure(CommandFailed(result))
- return True
- return self.cwd(path).addCallback(cbResult)
-
-
- def makeDirectory(self, path):
- """
- Make a directory
-
- This method issues the MKD command.
-
- @param path: The path to the directory to create.
- @type path: C{str}
-
- @return: A L{Deferred} which fires when the server responds. If the
- directory is created, the L{Deferred} is called back with the
- server response. If the server response indicates the directory
- was not created, the L{Deferred} is errbacked with a L{Failure}
- wrapping L{CommandFailed} or L{BadResponse}.
- @rtype: L{Deferred}
-
- @since: 8.2
- """
- return self.queueStringCommand('MKD ' + self.escapePath(path))
-
-
- def removeFile(self, path):
- """
- Delete a file on the server.
-
- L{removeFile} issues a I{DELE} command to the server to remove the
- indicated file. Note that this command cannot remove a directory.
-
- @param path: The path to the file to delete. May be relative to the
- current dir.
- @type path: C{str}
-
- @return: A L{Deferred} which fires when the server responds. On error,
- it is errbacked with either L{CommandFailed} or L{BadResponse}. On
- success, it is called back with a list of response lines.
- @rtype: L{Deferred}
-
- @since: 8.2
- """
- return self.queueStringCommand('DELE ' + self.escapePath(path))
-
-
- def removeDirectory(self, path):
- """
- Delete a directory on the server.
-
- L{removeDirectory} issues a I{RMD} command to the server to remove the
- indicated directory. Described in RFC959.
-
- @param path: The path to the directory to delete. May be relative to
- the current working directory.
- @type path: C{str}
-
- @return: A L{Deferred} which fires when the server responds. On error,
- it is errbacked with either L{CommandFailed} or L{BadResponse}. On
- success, it is called back with a list of response lines.
- @rtype: L{Deferred}
-
- @since: 11.1
- """
- return self.queueStringCommand('RMD ' + self.escapePath(path))
-
-
- def cdup(self):
- """
- Issues the CDUP (Change Directory UP) command.
-
- @return: a L{Deferred} that will be called when done.
- """
- return self.queueStringCommand('CDUP')
-
-
- def pwd(self):
- """
- Issues the PWD (Print Working Directory) command.
-
- The L{getDirectory} does the same job but automatically parses the
- result.
-
- @return: a L{Deferred} that will be called when done. It is up to the
- caller to interpret the response, but the L{parsePWDResponse} method
- in this module should work.
- """
- return self.queueStringCommand('PWD')
-
-
- def getDirectory(self):
- """
- Returns the current remote directory.
-
- @return: a L{Deferred} that will be called back with a C{str} giving
- the remote directory or which will errback with L{CommandFailed}
- if an error response is returned.
- """
- def cbParse(result):
- try:
- # The only valid code is 257
- if int(result[0].split(' ', 1)[0]) != 257:
- raise ValueError
- except (IndexError, ValueError):
- return failure.Failure(CommandFailed(result))
- path = parsePWDResponse(result[0])
- if path is None:
- return failure.Failure(CommandFailed(result))
- return path
- return self.pwd().addCallback(cbParse)
-
-
- def quit(self):
- """
- Issues the I{QUIT} command.
-
- @return: A L{Deferred} that fires when the server acknowledges the
- I{QUIT} command. The transport should not be disconnected until
- this L{Deferred} fires.
- """
- return self.queueStringCommand('QUIT')
-
-
-
-class FTPFileListProtocol(basic.LineReceiver):
- """Parser for standard FTP file listings
-
- This is the evil required to match::
-
- -rw-r--r-- 1 root other 531 Jan 29 03:26 README
-
- If you need different evil for a wacky FTP server, you can
- override either C{fileLinePattern} or C{parseDirectoryLine()}.
-
- It populates the instance attribute self.files, which is a list containing
- dicts with the following keys (examples from the above line):
- - filetype: e.g. 'd' for directories, or '-' for an ordinary file
- - perms: e.g. 'rw-r--r--'
- - nlinks: e.g. 1
- - owner: e.g. 'root'
- - group: e.g. 'other'
- - size: e.g. 531
- - date: e.g. 'Jan 29 03:26'
- - filename: e.g. 'README'
- - linktarget: e.g. 'some/file'
-
- Note that the 'date' value will be formatted differently depending on the
- date. Check U{http://cr.yp.to/ftp.html} if you really want to try to parse
- it.
-
- @ivar files: list of dicts describing the files in this listing
- """
- fileLinePattern = re.compile(
- r'^(?P<filetype>.)(?P<perms>.{9})\s+(?P<nlinks>\d*)\s*'
- r'(?P<owner>\S+)\s+(?P<group>\S+)\s+(?P<size>\d+)\s+'
- r'(?P<date>...\s+\d+\s+[\d:]+)\s+(?P<filename>([^ ]|\\ )*?)'
- r'( -> (?P<linktarget>[^\r]*))?\r?$'
- )
- delimiter = '\n'
-
- def __init__(self):
- self.files = []
-
- def lineReceived(self, line):
- d = self.parseDirectoryLine(line)
- if d is None:
- self.unknownLine(line)
- else:
- self.addFile(d)
-
- def parseDirectoryLine(self, line):
- """Return a dictionary of fields, or None if line cannot be parsed.
-
- @param line: line of text expected to contain a directory entry
- @type line: str
-
- @return: dict
- """
- match = self.fileLinePattern.match(line)
- if match is None:
- return None
- else:
- d = match.groupdict()
- d['filename'] = d['filename'].replace(r'\ ', ' ')
- d['nlinks'] = int(d['nlinks'])
- d['size'] = int(d['size'])
- if d['linktarget']:
- d['linktarget'] = d['linktarget'].replace(r'\ ', ' ')
- return d
-
- def addFile(self, info):
- """Append file information dictionary to the list of known files.
-
- Subclasses can override or extend this method to handle file
- information differently without affecting the parsing of data
- from the server.
-
- @param info: dictionary containing the parsed representation
- of the file information
- @type info: dict
- """
- self.files.append(info)
-
- def unknownLine(self, line):
- """Deal with received lines which could not be parsed as file
- information.
-
- Subclasses can override this to perform any special processing
- needed.
-
- @param line: unparsable line as received
- @type line: str
- """
- pass
-
-def parsePWDResponse(response):
- """Returns the path from a response to a PWD command.
-
- Responses typically look like::
-
- 257 "/home/andrew" is current directory.
-
- For this example, I will return C{'/home/andrew'}.
-
- If I can't find the path, I return C{None}.
- """
- match = re.search('"(.*)"', response)
- if match:
- return match.groups()[0]
- else:
- return None
diff --git a/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/gps/__init__.py b/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/gps/__init__.py
deleted file mode 100755
index 278648c3..00000000
--- a/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/gps/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-"""Global Positioning System protocols."""
diff --git a/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/gps/nmea.py b/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/gps/nmea.py
deleted file mode 100755
index 71d37ea6..00000000
--- a/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/gps/nmea.py
+++ /dev/null
@@ -1,209 +0,0 @@
-# -*- test-case-name: twisted.test.test_nmea -*-
-# Copyright (c) Twisted Matrix Laboratories.
-# See LICENSE for details.
-
-"""NMEA 0183 implementation
-
-Maintainer: Bob Ippolito
-
-The following NMEA 0183 sentences are currently understood::
- GPGGA (fix)
- GPGLL (position)
- GPRMC (position and time)
- GPGSA (active satellites)
-
-The following NMEA 0183 sentences require implementation::
- None really, the others aren't generally useful or implemented in most devices anyhow
-
-Other desired features::
- - A NMEA 0183 producer to emulate GPS devices (?)
-"""
-
-import operator
-from twisted.protocols import basic
-from twisted.python.compat import reduce
-
-POSFIX_INVALID, POSFIX_SPS, POSFIX_DGPS, POSFIX_PPS = 0, 1, 2, 3
-MODE_AUTO, MODE_FORCED = 'A', 'M'
-MODE_NOFIX, MODE_2D, MODE_3D = 1, 2, 3
-
-class InvalidSentence(Exception):
- pass
-
-class InvalidChecksum(Exception):
- pass
-
-class NMEAReceiver(basic.LineReceiver):
- """This parses most common NMEA-0183 messages, presumably from a serial GPS device at 4800 bps
- """
- delimiter = '\r\n'
- dispatch = {
- 'GPGGA': 'fix',
- 'GPGLL': 'position',
- 'GPGSA': 'activesatellites',
- 'GPRMC': 'positiontime',
- 'GPGSV': 'viewsatellites', # not implemented
- 'GPVTG': 'course', # not implemented
- 'GPALM': 'almanac', # not implemented
- 'GPGRS': 'range', # not implemented
- 'GPGST': 'noise', # not implemented
- 'GPMSS': 'beacon', # not implemented
- 'GPZDA': 'time', # not implemented
- }
- # generally you may miss the beginning of the first message
- ignore_invalid_sentence = 1
- # checksums shouldn't be invalid
- ignore_checksum_mismatch = 0
- # ignore unknown sentence types
- ignore_unknown_sentencetypes = 0
- # do we want to even bother checking to see if it's from the 20th century?
- convert_dates_before_y2k = 1
-
- def lineReceived(self, line):
- if not line.startswith('$'):
- if self.ignore_invalid_sentence:
- return
- raise InvalidSentence("%r does not begin with $" % (line,))
- # message is everything between $ and *, checksum is xor of all ASCII values of the message
- strmessage, checksum = line[1:].strip().split('*')
- message = strmessage.split(',')
- sentencetype, message = message[0], message[1:]
- dispatch = self.dispatch.get(sentencetype, None)
- if (not dispatch) and (not self.ignore_unknown_sentencetypes):
- raise InvalidSentence("sentencetype %r" % (sentencetype,))
- if not self.ignore_checksum_mismatch:
- checksum, calculated_checksum = int(checksum, 16), reduce(operator.xor, map(ord, strmessage))
- if checksum != calculated_checksum:
- raise InvalidChecksum("Given 0x%02X != 0x%02X" % (checksum, calculated_checksum))
- handler = getattr(self, "handle_%s" % dispatch, None)
- decoder = getattr(self, "decode_%s" % dispatch, None)
- if not (dispatch and handler and decoder):
- # missing dispatch, handler, or decoder
- return
- # return handler(*decoder(*message))
- try:
- decoded = decoder(*message)
- except Exception, e:
- raise InvalidSentence("%r is not a valid %s (%s) sentence" % (line, sentencetype, dispatch))
- return handler(*decoded)
-
- def decode_position(self, latitude, ns, longitude, ew, utc, status):
- latitude, longitude = self._decode_latlon(latitude, ns, longitude, ew)
- utc = self._decode_utc(utc)
- if status == 'A':
- status = 1
- else:
- status = 0
- return (
- latitude,
- longitude,
- utc,
- status,
- )
-
- def decode_positiontime(self, utc, status, latitude, ns, longitude, ew, speed, course, utcdate, magvar, magdir):
- utc = self._decode_utc(utc)
- latitude, longitude = self._decode_latlon(latitude, ns, longitude, ew)
- if speed != '':
- speed = float(speed)
- else:
- speed = None
- if course != '':
- course = float(course)
- else:
- course = None
- utcdate = 2000+int(utcdate[4:6]), int(utcdate[2:4]), int(utcdate[0:2])
- if self.convert_dates_before_y2k and utcdate[0] > 2073:
- # GPS was invented by the US DoD in 1973, but NMEA uses 2 digit year.
- # Highly unlikely that we'll be using NMEA or this twisted module in 70 years,
- # but remotely possible that you'll be using it to play back data from the 20th century.
- utcdate = (utcdate[0] - 100, utcdate[1], utcdate[2])
- if magvar != '':
- magvar = float(magvar)
- if magdir == 'W':
- magvar = -magvar
- else:
- magvar = None
- return (
- latitude,
- longitude,
- speed,
- course,
- # UTC seconds past utcdate
- utc,
- # UTC (year, month, day)
- utcdate,
- # None or magnetic variation in degrees (west is negative)
- magvar,
- )
-
- def _decode_utc(self, utc):
- utc_hh, utc_mm, utc_ss = map(float, (utc[:2], utc[2:4], utc[4:]))
- return utc_hh * 3600.0 + utc_mm * 60.0 + utc_ss
-
- def _decode_latlon(self, latitude, ns, longitude, ew):
- latitude = float(latitude[:2]) + float(latitude[2:])/60.0
- if ns == 'S':
- latitude = -latitude
- longitude = float(longitude[:3]) + float(longitude[3:])/60.0
- if ew == 'W':
- longitude = -longitude
- return (latitude, longitude)
-
- def decode_activesatellites(self, mode1, mode2, *args):
- satellites, (pdop, hdop, vdop) = args[:12], map(float, args[12:])
- satlist = []
- for n in satellites:
- if n:
- satlist.append(int(n))
- else:
- satlist.append(None)
- mode = (mode1, int(mode2))
- return (
- # satellite list by channel
- tuple(satlist),
- # (MODE_AUTO/MODE_FORCED, MODE_NOFIX/MODE_2DFIX/MODE_3DFIX)
- mode,
- # position dilution of precision
- pdop,
- # horizontal dilution of precision
- hdop,
- # vertical dilution of precision
- vdop,
- )
-
- def decode_fix(self, utc, latitude, ns, longitude, ew, posfix, satellites, hdop, altitude, altitude_units, geoid_separation, geoid_separation_units, dgps_age, dgps_station_id):
- latitude, longitude = self._decode_latlon(latitude, ns, longitude, ew)
- utc = self._decode_utc(utc)
- posfix = int(posfix)
- satellites = int(satellites)
- hdop = float(hdop)
- altitude = (float(altitude), altitude_units)
- if geoid_separation != '':
- geoid = (float(geoid_separation), geoid_separation_units)
- else:
- geoid = None
- if dgps_age != '':
- dgps = (float(dgps_age), dgps_station_id)
- else:
- dgps = None
- return (
- # seconds since 00:00 UTC
- utc,
- # latitude (degrees)
- latitude,
- # longitude (degrees)
- longitude,
- # position fix status (POSFIX_INVALID, POSFIX_SPS, POSFIX_DGPS, POSFIX_PPS)
- posfix,
- # number of satellites used for fix 0 <= satellites <= 12
- satellites,
- # horizontal dilution of precision
- hdop,
- # None or (altitude according to WGS-84 ellipsoid, units (typically 'M' for meters))
- altitude,
- # None or (geoid separation according to WGS-84 ellipsoid, units (typically 'M' for meters))
- geoid,
- # (age of dgps data in seconds, dgps station id)
- dgps,
- )
diff --git a/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/gps/rockwell.py b/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/gps/rockwell.py
deleted file mode 100755
index 7c1d2adc..00000000
--- a/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/gps/rockwell.py
+++ /dev/null
@@ -1,268 +0,0 @@
-# Copyright (c) Twisted Matrix Laboratories.
-# See LICENSE for details.
-
-
-"""Rockwell Semiconductor Zodiac Serial Protocol
-Coded from official protocol specs (Order No. GPS-25, 09/24/1996, Revision 11)
-
-Maintainer: Bob Ippolito
-
-The following Rockwell Zodiac messages are currently understood::
- EARTHA\\r\\n (a hack to "turn on" a DeLorme Earthmate)
- 1000 (Geodesic Position Status Output)
- 1002 (Channel Summary)
- 1003 (Visible Satellites)
- 1011 (Receiver ID)
-
-The following Rockwell Zodiac messages require implementation::
- None really, the others aren't quite so useful and require bidirectional communication w/ the device
-
-Other desired features::
- - Compatability with the DeLorme Tripmate and other devices with this chipset (?)
-"""
-
-import struct, operator, math
-from twisted.internet import protocol
-from twisted.python import log
-
-DEBUG = 1
-
-class ZodiacParseError(ValueError):
- pass
-
-class Zodiac(protocol.Protocol):
- dispatch = {
- # Output Messages (* means they get sent by the receiver by default periodically)
- 1000: 'fix', # *Geodesic Position Status Output
- 1001: 'ecef', # ECEF Position Status Output
- 1002: 'channels', # *Channel Summary
- 1003: 'satellites', # *Visible Satellites
- 1005: 'dgps', # Differential GPS Status
- 1007: 'channelmeas', # Channel Measurement
- 1011: 'id', # *Receiver ID
- 1012: 'usersettings', # User-Settings Output
- 1100: 'testresults', # Built-In Test Results
- 1102: 'meastimemark', # Measurement Time Mark
- 1108: 'utctimemark', # UTC Time Mark Pulse Output
- 1130: 'serial', # Serial Port Communication Parameters In Use
- 1135: 'eepromupdate', # EEPROM Update
- 1136: 'eepromstatus', # EEPROM Status
- }
- # these aren't used for anything yet, just sitting here for reference
- messages = {
- # Input Messages
- 'fix': 1200, # Geodesic Position and Velocity Initialization
- 'udatum': 1210, # User-Defined Datum Definition
- 'mdatum': 1211, # Map Datum Select
- 'smask': 1212, # Satellite Elevation Mask Control
- 'sselect': 1213, # Satellite Candidate Select
- 'dgpsc': 1214, # Differential GPS Control
- 'startc': 1216, # Cold Start Control
- 'svalid': 1217, # Solution Validity Control
- 'antenna': 1218, # Antenna Type Select
- 'altinput': 1219, # User-Entered Altitude Input
- 'appctl': 1220, # Application Platform Control
- 'navcfg': 1221, # Nav Configuration
- 'test': 1300, # Perform Built-In Test Command
- 'restart': 1303, # Restart Command
- 'serial': 1330, # Serial Port Communications Parameters
- 'msgctl': 1331, # Message Protocol Control
- 'dgpsd': 1351, # Raw DGPS RTCM SC-104 Data
- }
- MAX_LENGTH = 296
- allow_earthmate_hack = 1
- recvd = ""
-
- def dataReceived(self, recd):
- self.recvd = self.recvd + recd
- while len(self.recvd) >= 10:
-
- # hack for DeLorme EarthMate
- if self.recvd[:8] == 'EARTHA\r\n':
- if self.allow_earthmate_hack:
- self.allow_earthmate_hack = 0
- self.transport.write('EARTHA\r\n')
- self.recvd = self.recvd[8:]
- continue
-
- if self.recvd[0:2] != '\xFF\x81':
- if DEBUG:
- raise ZodiacParseError('Invalid Sync %r' % self.recvd)
- else:
- raise ZodiacParseError
- sync, msg_id, length, acknak, checksum = struct.unpack('<HHHHh', self.recvd[:10])
-
- # verify checksum
- cksum = -(reduce(operator.add, (sync, msg_id, length, acknak)) & 0xFFFF)
- cksum, = struct.unpack('<h', struct.pack('<h', cksum))
- if cksum != checksum:
- if DEBUG:
- raise ZodiacParseError('Invalid Header Checksum %r != %r %r' % (checksum, cksum, self.recvd[:8]))
- else:
- raise ZodiacParseError
-
- # length was in words, now it's bytes
- length = length * 2
-
- # do we need more data ?
- neededBytes = 10
- if length:
- neededBytes += length + 2
- if len(self.recvd) < neededBytes:
- break
-
- if neededBytes > self.MAX_LENGTH:
- raise ZodiacParseError("Invalid Header??")
-
- # empty messages pass empty strings
- message = ''
-
- # does this message have data ?
- if length:
- message, checksum = self.recvd[10:10+length], struct.unpack('<h', self.recvd[10+length:neededBytes])[0]
- cksum = 0x10000 - (reduce(operator.add, struct.unpack('<%dH' % (length/2), message)) & 0xFFFF)
- cksum, = struct.unpack('<h', struct.pack('<h', cksum))
- if cksum != checksum:
- if DEBUG:
- log.dmsg('msg_id = %r length = %r' % (msg_id, length), debug=True)
- raise ZodiacParseError('Invalid Data Checksum %r != %r %r' % (checksum, cksum, message))
- else:
- raise ZodiacParseError
-
- # discard used buffer, dispatch message
- self.recvd = self.recvd[neededBytes:]
- self.receivedMessage(msg_id, message, acknak)
-
- def receivedMessage(self, msg_id, message, acknak):
- dispatch = self.dispatch.get(msg_id, None)
- if not dispatch:
- raise ZodiacParseError('Unknown msg_id = %r' % msg_id)
- handler = getattr(self, 'handle_%s' % dispatch, None)
- decoder = getattr(self, 'decode_%s' % dispatch, None)
- if not (handler and decoder):
- # missing handler or decoder
- #if DEBUG:
- # log.msg('MISSING HANDLER/DECODER PAIR FOR: %r' % (dispatch,), debug=True)
- return
- decoded = decoder(message)
- return handler(*decoded)
-
- def decode_fix(self, message):
- assert len(message) == 98, "Geodesic Position Status Output should be 55 words total (98 byte message)"
- (ticks, msgseq, satseq, navstatus, navtype, nmeasure, polar, gpswk, gpses, gpsns, utcdy, utcmo, utcyr, utchr, utcmn, utcsc, utcns, latitude, longitude, height, geoidalsep, speed, course, magvar, climb, mapdatum, exhposerr, exvposerr, extimeerr, exphvelerr, clkbias, clkbiasdev, clkdrift, clkdriftdev) = struct.unpack('<LhhHHHHHLLHHHHHHLlllhLHhhHLLLHllll', message)
-
- # there's a lot of shit in here..
- # I'll just snag the important stuff and spit it out like my NMEA decoder
- utc = (utchr * 3600.0) + (utcmn * 60.0) + utcsc + (float(utcns) * 0.000000001)
-
- log.msg('utchr, utcmn, utcsc, utcns = ' + repr((utchr, utcmn, utcsc, utcns)), debug=True)
-
- latitude = float(latitude) * 0.00000180 / math.pi
- longitude = float(longitude) * 0.00000180 / math.pi
- posfix = not (navstatus & 0x001c)
- satellites = nmeasure
- hdop = float(exhposerr) * 0.01
- altitude = float(height) * 0.01, 'M'
- geoid = float(geoidalsep) * 0.01, 'M'
- dgps = None
- return (
- # seconds since 00:00 UTC
- utc,
- # latitude (degrees)
- latitude,
- # longitude (degrees)
- longitude,
- # position fix status (invalid = False, valid = True)
- posfix,
- # number of satellites [measurements] used for fix 0 <= satellites <= 12
- satellites,
- # horizontal dilution of precision
- hdop,
- # (altitude according to WGS-84 ellipsoid, units (always 'M' for meters))
- altitude,
- # (geoid separation according to WGS-84 ellipsoid, units (always 'M' for meters))
- geoid,
- # None, for compatability w/ NMEA code
- dgps,
- )
-
- def decode_id(self, message):
- assert len(message) == 106, "Receiver ID Message should be 59 words total (106 byte message)"
- ticks, msgseq, channels, software_version, software_date, options_list, reserved = struct.unpack('<Lh20s20s20s20s20s', message)
- channels, software_version, software_date, options_list = map(lambda s: s.split('\0')[0], (channels, software_version, software_date, options_list))
- software_version = float(software_version)
- channels = int(channels) # 0-12 .. but ALWAYS 12, so we ignore.
- options_list = int(options_list[:4], 16) # only two bitflags, others are reserved
- minimize_rom = (options_list & 0x01) > 0
- minimize_ram = (options_list & 0x02) > 0
- # (version info), (options info)
- return ((software_version, software_date), (minimize_rom, minimize_ram))
-
- def decode_channels(self, message):
- assert len(message) == 90, "Channel Summary Message should be 51 words total (90 byte message)"
- ticks, msgseq, satseq, gpswk, gpsws, gpsns = struct.unpack('<LhhHLL', message[:18])
- channels = []
- message = message[18:]
- for i in range(12):
- flags, prn, cno = struct.unpack('<HHH', message[6 * i:6 * (i + 1)])
- # measurement used, ephemeris available, measurement valid, dgps corrections available
- flags = (flags & 0x01, flags & 0x02, flags & 0x04, flags & 0x08)
- channels.append((flags, prn, cno))
- # ((flags, satellite PRN, C/No in dbHz)) for 12 channels
- # satellite message sequence number
- # gps week number, gps seconds in week (??), gps nanoseconds from Epoch
- return (tuple(channels),) #, satseq, (gpswk, gpsws, gpsns))
-
- def decode_satellites(self, message):
- assert len(message) == 90, "Visible Satellites Message should be 51 words total (90 byte message)"
- ticks, msgseq, gdop, pdop, hdop, vdop, tdop, numsatellites = struct.unpack('<LhhhhhhH', message[:18])
- gdop, pdop, hdop, vdop, tdop = map(lambda n: float(n) * 0.01, (gdop, pdop, hdop, vdop, tdop))
- satellites = []
- message = message[18:]
- for i in range(numsatellites):
- prn, azi, elev = struct.unpack('<Hhh', message[6 * i:6 * (i + 1)])
- azi, elev = map(lambda n: (float(n) * 0.0180 / math.pi), (azi, elev))
- satellites.push((prn, azi, elev))
- # ((PRN [0, 32], azimuth +=[0.0, 180.0] deg, elevation +-[0.0, 90.0] deg)) satellite info (0-12)
- # (geometric, position, horizontal, vertical, time) dilution of precision
- return (tuple(satellites), (gdop, pdop, hdop, vdop, tdop))
-
- def decode_dgps(self, message):
- assert len(message) == 38, "Differential GPS Status Message should be 25 words total (38 byte message)"
- raise NotImplementedError
-
- def decode_ecef(self, message):
- assert len(message) == 96, "ECEF Position Status Output Message should be 54 words total (96 byte message)"
- raise NotImplementedError
-
- def decode_channelmeas(self, message):
- assert len(message) == 296, "Channel Measurement Message should be 154 words total (296 byte message)"
- raise NotImplementedError
-
- def decode_usersettings(self, message):
- assert len(message) == 32, "User-Settings Output Message should be 22 words total (32 byte message)"
- raise NotImplementedError
-
- def decode_testresults(self, message):
- assert len(message) == 28, "Built-In Test Results Message should be 20 words total (28 byte message)"
- raise NotImplementedError
-
- def decode_meastimemark(self, message):
- assert len(message) == 494, "Measurement Time Mark Message should be 253 words total (494 byte message)"
- raise NotImplementedError
-
- def decode_utctimemark(self, message):
- assert len(message) == 28, "UTC Time Mark Pulse Output Message should be 20 words total (28 byte message)"
- raise NotImplementedError
-
- def decode_serial(self, message):
- assert len(message) == 30, "Serial Port Communication Paramaters In Use Message should be 21 words total (30 byte message)"
- raise NotImplementedError
-
- def decode_eepromupdate(self, message):
- assert len(message) == 8, "EEPROM Update Message should be 10 words total (8 byte message)"
- raise NotImplementedError
-
- def decode_eepromstatus(self, message):
- assert len(message) == 24, "EEPROM Status Message should be 18 words total (24 byte message)"
- raise NotImplementedError
diff --git a/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/htb.py b/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/htb.py
deleted file mode 100755
index 10008cf4..00000000
--- a/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/htb.py
+++ /dev/null
@@ -1,297 +0,0 @@
-# -*- test-case-name: twisted.test.test_htb -*-
-# Copyright (c) Twisted Matrix Laboratories.
-# See LICENSE for details.
-
-
-"""
-Hierarchical Token Bucket traffic shaping.
-
-Patterned after U{Martin Devera's Hierarchical Token Bucket traffic
-shaper for the Linux kernel<http://luxik.cdi.cz/~devik/qos/htb/>}.
-
-@seealso: U{HTB Linux queuing discipline manual - user guide
- <http://luxik.cdi.cz/~devik/qos/htb/manual/userg.htm>}
-@seealso: U{Token Bucket Filter in Linux Advanced Routing & Traffic Control
- HOWTO<http://lartc.org/howto/lartc.qdisc.classless.html#AEN682>}
-"""
-
-
-# TODO: Investigate whether we should be using os.times()[-1] instead of
-# time.time. time.time, it has been pointed out, can go backwards. Is
-# the same true of os.times?
-from time import time
-from zope.interface import implements, Interface
-
-from twisted.protocols import pcp
-
-
-class Bucket:
- """
- Implementation of a Token bucket.
-
- A bucket can hold a certain number of tokens and it drains over time.
-
- @cvar maxburst: The maximum number of tokens that the bucket can
- hold at any given time. If this is C{None}, the bucket has
- an infinite size.
- @type maxburst: C{int}
- @cvar rate: The rate at which the bucket drains, in number
- of tokens per second. If the rate is C{None}, the bucket
- drains instantaneously.
- @type rate: C{int}
- """
-
- maxburst = None
- rate = None
-
- _refcount = 0
-
- def __init__(self, parentBucket=None):
- """
- Create a L{Bucket} that may have a parent L{Bucket}.
-
- @param parentBucket: If a parent Bucket is specified,
- all L{add} and L{drip} operations on this L{Bucket}
- will be applied on the parent L{Bucket} as well.
- @type parentBucket: L{Bucket}
- """
- self.content = 0
- self.parentBucket = parentBucket
- self.lastDrip = time()
-
-
- def add(self, amount):
- """
- Adds tokens to the L{Bucket} and its C{parentBucket}.
-
- This will add as many of the C{amount} tokens as will fit into both
- this L{Bucket} and its C{parentBucket}.
-
- @param amount: The number of tokens to try to add.
- @type amount: C{int}
-
- @returns: The number of tokens that actually fit.
- @returntype: C{int}
- """
- self.drip()
- if self.maxburst is None:
- allowable = amount
- else:
- allowable = min(amount, self.maxburst - self.content)
-
- if self.parentBucket is not None:
- allowable = self.parentBucket.add(allowable)
- self.content += allowable
- return allowable
-
-
- def drip(self):
- """
- Let some of the bucket drain.
-
- The L{Bucket} drains at the rate specified by the class
- variable C{rate}.
-
- @returns: C{True} if the bucket is empty after this drip.
- @returntype: C{bool}
- """
- if self.parentBucket is not None:
- self.parentBucket.drip()
-
- if self.rate is None:
- self.content = 0
- else:
- now = time()
- deltaTime = now - self.lastDrip
- deltaTokens = deltaTime * self.rate
- self.content = max(0, self.content - deltaTokens)
- self.lastDrip = now
- return self.content == 0
-
-
-class IBucketFilter(Interface):
- def getBucketFor(*somethings, **some_kw):
- """
- Return a L{Bucket} corresponding to the provided parameters.
-
- @returntype: L{Bucket}
- """
-
-class HierarchicalBucketFilter:
- """
- Filter things into buckets that can be nested.
-
- @cvar bucketFactory: Class of buckets to make.
- @type bucketFactory: L{Bucket}
- @cvar sweepInterval: Seconds between sweeping out the bucket cache.
- @type sweepInterval: C{int}
- """
-
- implements(IBucketFilter)
-
- bucketFactory = Bucket
- sweepInterval = None
-
- def __init__(self, parentFilter=None):
- self.buckets = {}
- self.parentFilter = parentFilter
- self.lastSweep = time()
-
- def getBucketFor(self, *a, **kw):
- """
- Find or create a L{Bucket} corresponding to the provided parameters.
-
- Any parameters are passed on to L{getBucketKey}, from them it
- decides which bucket you get.
-
- @returntype: L{Bucket}
- """
- if ((self.sweepInterval is not None)
- and ((time() - self.lastSweep) > self.sweepInterval)):
- self.sweep()
-
- if self.parentFilter:
- parentBucket = self.parentFilter.getBucketFor(self, *a, **kw)
- else:
- parentBucket = None
-
- key = self.getBucketKey(*a, **kw)
- bucket = self.buckets.get(key)
- if bucket is None:
- bucket = self.bucketFactory(parentBucket)
- self.buckets[key] = bucket
- return bucket
-
- def getBucketKey(self, *a, **kw):
- """
- Construct a key based on the input parameters to choose a L{Bucket}.
-
- The default implementation returns the same key for all
- arguments. Override this method to provide L{Bucket} selection.
-
- @returns: Something to be used as a key in the bucket cache.
- """
- return None
-
- def sweep(self):
- """
- Remove empty buckets.
- """
- for key, bucket in self.buckets.items():
- bucket_is_empty = bucket.drip()
- if (bucket._refcount == 0) and bucket_is_empty:
- del self.buckets[key]
-
- self.lastSweep = time()
-
-
-class FilterByHost(HierarchicalBucketFilter):
- """
- A Hierarchical Bucket filter with a L{Bucket} for each host.
- """
- sweepInterval = 60 * 20
-
- def getBucketKey(self, transport):
- return transport.getPeer()[1]
-
-
-class FilterByServer(HierarchicalBucketFilter):
- """
- A Hierarchical Bucket filter with a L{Bucket} for each service.
- """
- sweepInterval = None
-
- def getBucketKey(self, transport):
- return transport.getHost()[2]
-
-
-class ShapedConsumer(pcp.ProducerConsumerProxy):
- """
- Wraps a C{Consumer} and shapes the rate at which it receives data.
- """
- # Providing a Pull interface means I don't have to try to schedule
- # traffic with callLaters.
- iAmStreaming = False
-
- def __init__(self, consumer, bucket):
- pcp.ProducerConsumerProxy.__init__(self, consumer)
- self.bucket = bucket
- self.bucket._refcount += 1
-
- def _writeSomeData(self, data):
- # In practice, this actually results in obscene amounts of
- # overhead, as a result of generating lots and lots of packets
- # with twelve-byte payloads. We may need to do a version of
- # this with scheduled writes after all.
- amount = self.bucket.add(len(data))
- return pcp.ProducerConsumerProxy._writeSomeData(self, data[:amount])
-
- def stopProducing(self):
- pcp.ProducerConsumerProxy.stopProducing(self)
- self.bucket._refcount -= 1
-
-
-class ShapedTransport(ShapedConsumer):
- """
- Wraps a C{Transport} and shapes the rate at which it receives data.
-
- This is a L{ShapedConsumer} with a little bit of magic to provide for
- the case where the consumer it wraps is also a C{Transport} and people
- will be attempting to access attributes this does not proxy as a
- C{Consumer} (e.g. C{loseConnection}).
- """
- # Ugh. We only wanted to filter IConsumer, not ITransport.
-
- iAmStreaming = False
- def __getattr__(self, name):
- # Because people will be doing things like .getPeer and
- # .loseConnection on me.
- return getattr(self.consumer, name)
-
-
-class ShapedProtocolFactory:
- """
- Dispense C{Protocols} with traffic shaping on their transports.
-
- Usage::
-
- myserver = SomeFactory()
- myserver.protocol = ShapedProtocolFactory(myserver.protocol,
- bucketFilter)
-
- Where C{SomeServerFactory} is a L{twisted.internet.protocol.Factory}, and
- C{bucketFilter} is an instance of L{HierarchicalBucketFilter}.
- """
- def __init__(self, protoClass, bucketFilter):
- """
- Tell me what to wrap and where to get buckets.
-
- @param protoClass: The class of C{Protocol} this will generate
- wrapped instances of.
- @type protoClass: L{Protocol<twisted.internet.interfaces.IProtocol>}
- class
- @param bucketFilter: The filter which will determine how
- traffic is shaped.
- @type bucketFilter: L{HierarchicalBucketFilter}.
- """
- # More precisely, protoClass can be any callable that will return
- # instances of something that implements IProtocol.
- self.protocol = protoClass
- self.bucketFilter = bucketFilter
-
- def __call__(self, *a, **kw):
- """
- Make a C{Protocol} instance with a shaped transport.
-
- Any parameters will be passed on to the protocol's initializer.
-
- @returns: A C{Protocol} instance with a L{ShapedTransport}.
- """
- proto = self.protocol(*a, **kw)
- origMakeConnection = proto.makeConnection
- def makeConnection(transport):
- bucket = self.bucketFilter.getBucketFor(transport)
- shapedTransport = ShapedTransport(transport, bucket)
- return origMakeConnection(shapedTransport)
- proto.makeConnection = makeConnection
- return proto
diff --git a/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/ident.py b/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/ident.py
deleted file mode 100755
index 985322df..00000000
--- a/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/ident.py
+++ /dev/null
@@ -1,231 +0,0 @@
-# -*- test-case-name: twisted.test.test_ident -*-
-# Copyright (c) Twisted Matrix Laboratories.
-# See LICENSE for details.
-
-"""
-Ident protocol implementation.
-"""
-
-import struct
-
-from twisted.internet import defer
-from twisted.protocols import basic
-from twisted.python import log, failure
-
-_MIN_PORT = 1
-_MAX_PORT = 2 ** 16 - 1
-
-class IdentError(Exception):
- """
- Can't determine connection owner; reason unknown.
- """
-
- identDescription = 'UNKNOWN-ERROR'
-
- def __str__(self):
- return self.identDescription
-
-
-class NoUser(IdentError):
- """
- The connection specified by the port pair is not currently in use or
- currently not owned by an identifiable entity.
- """
- identDescription = 'NO-USER'
-
-
-class InvalidPort(IdentError):
- """
- Either the local or foreign port was improperly specified. This should
- be returned if either or both of the port ids were out of range (TCP
- port numbers are from 1-65535), negative integers, reals or in any
- fashion not recognized as a non-negative integer.
- """
- identDescription = 'INVALID-PORT'
-
-
-class HiddenUser(IdentError):
- """
- The server was able to identify the user of this port, but the
- information was not returned at the request of the user.
- """
- identDescription = 'HIDDEN-USER'
-
-
-class IdentServer(basic.LineOnlyReceiver):
- """
- The Identification Protocol (a.k.a., "ident", a.k.a., "the Ident
- Protocol") provides a means to determine the identity of a user of a
- particular TCP connection. Given a TCP port number pair, it returns a
- character string which identifies the owner of that connection on the
- server's system.
-
- Server authors should subclass this class and override the lookup method.
- The default implementation returns an UNKNOWN-ERROR response for every
- query.
- """
-
- def lineReceived(self, line):
- parts = line.split(',')
- if len(parts) != 2:
- self.invalidQuery()
- else:
- try:
- portOnServer, portOnClient = map(int, parts)
- except ValueError:
- self.invalidQuery()
- else:
- if _MIN_PORT <= portOnServer <= _MAX_PORT and _MIN_PORT <= portOnClient <= _MAX_PORT:
- self.validQuery(portOnServer, portOnClient)
- else:
- self._ebLookup(failure.Failure(InvalidPort()), portOnServer, portOnClient)
-
- def invalidQuery(self):
- self.transport.loseConnection()
-
-
- def validQuery(self, portOnServer, portOnClient):
- """
- Called when a valid query is received to look up and deliver the
- response.
-
- @param portOnServer: The server port from the query.
- @param portOnClient: The client port from the query.
- """
- serverAddr = self.transport.getHost().host, portOnServer
- clientAddr = self.transport.getPeer().host, portOnClient
- defer.maybeDeferred(self.lookup, serverAddr, clientAddr
- ).addCallback(self._cbLookup, portOnServer, portOnClient
- ).addErrback(self._ebLookup, portOnServer, portOnClient
- )
-
-
- def _cbLookup(self, (sysName, userId), sport, cport):
- self.sendLine('%d, %d : USERID : %s : %s' % (sport, cport, sysName, userId))
-
- def _ebLookup(self, failure, sport, cport):
- if failure.check(IdentError):
- self.sendLine('%d, %d : ERROR : %s' % (sport, cport, failure.value))
- else:
- log.err(failure)
- self.sendLine('%d, %d : ERROR : %s' % (sport, cport, IdentError(failure.value)))
-
- def lookup(self, serverAddress, clientAddress):
- """Lookup user information about the specified address pair.
-
- Return value should be a two-tuple of system name and username.
- Acceptable values for the system name may be found online at::
-
- U{http://www.iana.org/assignments/operating-system-names}
-
- This method may also raise any IdentError subclass (or IdentError
- itself) to indicate user information will not be provided for the
- given query.
-
- A Deferred may also be returned.
-
- @param serverAddress: A two-tuple representing the server endpoint
- of the address being queried. The first element is a string holding
- a dotted-quad IP address. The second element is an integer
- representing the port.
-
- @param clientAddress: Like L{serverAddress}, but represents the
- client endpoint of the address being queried.
- """
- raise IdentError()
-
-class ProcServerMixin:
- """Implements lookup() to grab entries for responses from /proc/net/tcp
- """
-
- SYSTEM_NAME = 'LINUX'
-
- try:
- from pwd import getpwuid
- def getUsername(self, uid, getpwuid=getpwuid):
- return getpwuid(uid)[0]
- del getpwuid
- except ImportError:
- def getUsername(self, uid):
- raise IdentError()
-
- def entries(self):
- f = file('/proc/net/tcp')
- f.readline()
- for L in f:
- yield L.strip()
-
- def dottedQuadFromHexString(self, hexstr):
- return '.'.join(map(str, struct.unpack('4B', struct.pack('=L', int(hexstr, 16)))))
-
- def unpackAddress(self, packed):
- addr, port = packed.split(':')
- addr = self.dottedQuadFromHexString(addr)
- port = int(port, 16)
- return addr, port
-
- def parseLine(self, line):
- parts = line.strip().split()
- localAddr, localPort = self.unpackAddress(parts[1])
- remoteAddr, remotePort = self.unpackAddress(parts[2])
- uid = int(parts[7])
- return (localAddr, localPort), (remoteAddr, remotePort), uid
-
- def lookup(self, serverAddress, clientAddress):
- for ent in self.entries():
- localAddr, remoteAddr, uid = self.parseLine(ent)
- if remoteAddr == clientAddress and localAddr[1] == serverAddress[1]:
- return (self.SYSTEM_NAME, self.getUsername(uid))
-
- raise NoUser()
-
-
-class IdentClient(basic.LineOnlyReceiver):
-
- errorTypes = (IdentError, NoUser, InvalidPort, HiddenUser)
-
- def __init__(self):
- self.queries = []
-
- def lookup(self, portOnServer, portOnClient):
- """Lookup user information about the specified address pair.
- """
- self.queries.append((defer.Deferred(), portOnServer, portOnClient))
- if len(self.queries) > 1:
- return self.queries[-1][0]
-
- self.sendLine('%d, %d' % (portOnServer, portOnClient))
- return self.queries[-1][0]
-
- def lineReceived(self, line):
- if not self.queries:
- log.msg("Unexpected server response: %r" % (line,))
- else:
- d, _, _ = self.queries.pop(0)
- self.parseResponse(d, line)
- if self.queries:
- self.sendLine('%d, %d' % (self.queries[0][1], self.queries[0][2]))
-
- def connectionLost(self, reason):
- for q in self.queries:
- q[0].errback(IdentError(reason))
- self.queries = []
-
- def parseResponse(self, deferred, line):
- parts = line.split(':', 2)
- if len(parts) != 3:
- deferred.errback(IdentError(line))
- else:
- ports, type, addInfo = map(str.strip, parts)
- if type == 'ERROR':
- for et in self.errorTypes:
- if et.identDescription == addInfo:
- deferred.errback(et(line))
- return
- deferred.errback(IdentError(line))
- else:
- deferred.callback((type, addInfo))
-
-__all__ = ['IdentError', 'NoUser', 'InvalidPort', 'HiddenUser',
- 'IdentServer', 'IdentClient',
- 'ProcServerMixin']
diff --git a/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/loopback.py b/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/loopback.py
deleted file mode 100755
index e5848279..00000000
--- a/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/loopback.py
+++ /dev/null
@@ -1,372 +0,0 @@
-# -*- test-case-name: twisted.test.test_loopback -*-
-# Copyright (c) Twisted Matrix Laboratories.
-# See LICENSE for details.
-
-"""
-Testing support for protocols -- loopback between client and server.
-"""
-
-# system imports
-import tempfile
-from zope.interface import implements
-
-# Twisted Imports
-from twisted.protocols import policies
-from twisted.internet import interfaces, protocol, main, defer
-from twisted.internet.task import deferLater
-from twisted.python import failure
-from twisted.internet.interfaces import IAddress
-
-
-class _LoopbackQueue(object):
- """
- Trivial wrapper around a list to give it an interface like a queue, which
- the addition of also sending notifications by way of a Deferred whenever
- the list has something added to it.
- """
-
- _notificationDeferred = None
- disconnect = False
-
- def __init__(self):
- self._queue = []
-
-
- def put(self, v):
- self._queue.append(v)
- if self._notificationDeferred is not None:
- d, self._notificationDeferred = self._notificationDeferred, None
- d.callback(None)
-
-
- def __nonzero__(self):
- return bool(self._queue)
-
-
- def get(self):
- return self._queue.pop(0)
-
-
-
-class _LoopbackAddress(object):
- implements(IAddress)
-
-
-class _LoopbackTransport(object):
- implements(interfaces.ITransport, interfaces.IConsumer)
-
- disconnecting = False
- producer = None
-
- # ITransport
- def __init__(self, q):
- self.q = q
-
- def write(self, bytes):
- self.q.put(bytes)
-
- def writeSequence(self, iovec):
- self.q.put(''.join(iovec))
-
- def loseConnection(self):
- self.q.disconnect = True
- self.q.put(None)
-
- def getPeer(self):
- return _LoopbackAddress()
-
- def getHost(self):
- return _LoopbackAddress()
-
- # IConsumer
- def registerProducer(self, producer, streaming):
- assert self.producer is None
- self.producer = producer
- self.streamingProducer = streaming
- self._pollProducer()
-
- def unregisterProducer(self):
- assert self.producer is not None
- self.producer = None
-
- def _pollProducer(self):
- if self.producer is not None and not self.streamingProducer:
- self.producer.resumeProducing()
-
-
-
-def identityPumpPolicy(queue, target):
- """
- L{identityPumpPolicy} is a policy which delivers each chunk of data written
- to the given queue as-is to the target.
-
- This isn't a particularly realistic policy.
-
- @see: L{loopbackAsync}
- """
- while queue:
- bytes = queue.get()
- if bytes is None:
- break
- target.dataReceived(bytes)
-
-
-
-def collapsingPumpPolicy(queue, target):
- """
- L{collapsingPumpPolicy} is a policy which collapses all outstanding chunks
- into a single string and delivers it to the target.
-
- @see: L{loopbackAsync}
- """
- bytes = []
- while queue:
- chunk = queue.get()
- if chunk is None:
- break
- bytes.append(chunk)
- if bytes:
- target.dataReceived(''.join(bytes))
-
-
-
-def loopbackAsync(server, client, pumpPolicy=identityPumpPolicy):
- """
- Establish a connection between C{server} and C{client} then transfer data
- between them until the connection is closed. This is often useful for
- testing a protocol.
-
- @param server: The protocol instance representing the server-side of this
- connection.
-
- @param client: The protocol instance representing the client-side of this
- connection.
-
- @param pumpPolicy: When either C{server} or C{client} writes to its
- transport, the string passed in is added to a queue of data for the
- other protocol. Eventually, C{pumpPolicy} will be called with one such
- queue and the corresponding protocol object. The pump policy callable
- is responsible for emptying the queue and passing the strings it
- contains to the given protocol's C{dataReceived} method. The signature
- of C{pumpPolicy} is C{(queue, protocol)}. C{queue} is an object with a
- C{get} method which will return the next string written to the
- transport, or C{None} if the transport has been disconnected, and which
- evaluates to C{True} if and only if there are more items to be
- retrieved via C{get}.
-
- @return: A L{Deferred} which fires when the connection has been closed and
- both sides have received notification of this.
- """
- serverToClient = _LoopbackQueue()
- clientToServer = _LoopbackQueue()
-
- server.makeConnection(_LoopbackTransport(serverToClient))
- client.makeConnection(_LoopbackTransport(clientToServer))
-
- return _loopbackAsyncBody(
- server, serverToClient, client, clientToServer, pumpPolicy)
-
-
-
-def _loopbackAsyncBody(server, serverToClient, client, clientToServer,
- pumpPolicy):
- """
- Transfer bytes from the output queue of each protocol to the input of the other.
-
- @param server: The protocol instance representing the server-side of this
- connection.
-
- @param serverToClient: The L{_LoopbackQueue} holding the server's output.
-
- @param client: The protocol instance representing the client-side of this
- connection.
-
- @param clientToServer: The L{_LoopbackQueue} holding the client's output.
-
- @param pumpPolicy: See L{loopbackAsync}.
-
- @return: A L{Deferred} which fires when the connection has been closed and
- both sides have received notification of this.
- """
- def pump(source, q, target):
- sent = False
- if q:
- pumpPolicy(q, target)
- sent = True
- if sent and not q:
- # A write buffer has now been emptied. Give any producer on that
- # side an opportunity to produce more data.
- source.transport._pollProducer()
-
- return sent
-
- while 1:
- disconnect = clientSent = serverSent = False
-
- # Deliver the data which has been written.
- serverSent = pump(server, serverToClient, client)
- clientSent = pump(client, clientToServer, server)
-
- if not clientSent and not serverSent:
- # Neither side wrote any data. Wait for some new data to be added
- # before trying to do anything further.
- d = defer.Deferred()
- clientToServer._notificationDeferred = d
- serverToClient._notificationDeferred = d
- d.addCallback(
- _loopbackAsyncContinue,
- server, serverToClient, client, clientToServer, pumpPolicy)
- return d
- if serverToClient.disconnect:
- # The server wants to drop the connection. Flush any remaining
- # data it has.
- disconnect = True
- pump(server, serverToClient, client)
- elif clientToServer.disconnect:
- # The client wants to drop the connection. Flush any remaining
- # data it has.
- disconnect = True
- pump(client, clientToServer, server)
- if disconnect:
- # Someone wanted to disconnect, so okay, the connection is gone.
- server.connectionLost(failure.Failure(main.CONNECTION_DONE))
- client.connectionLost(failure.Failure(main.CONNECTION_DONE))
- return defer.succeed(None)
-
-
-
-def _loopbackAsyncContinue(ignored, server, serverToClient, client,
- clientToServer, pumpPolicy):
- # Clear the Deferred from each message queue, since it has already fired
- # and cannot be used again.
- clientToServer._notificationDeferred = None
- serverToClient._notificationDeferred = None
-
- # Schedule some more byte-pushing to happen. This isn't done
- # synchronously because no actual transport can re-enter dataReceived as
- # a result of calling write, and doing this synchronously could result
- # in that.
- from twisted.internet import reactor
- return deferLater(
- reactor, 0,
- _loopbackAsyncBody,
- server, serverToClient, client, clientToServer, pumpPolicy)
-
-
-
-class LoopbackRelay:
-
- implements(interfaces.ITransport, interfaces.IConsumer)
-
- buffer = ''
- shouldLose = 0
- disconnecting = 0
- producer = None
-
- def __init__(self, target, logFile=None):
- self.target = target
- self.logFile = logFile
-
- def write(self, data):
- self.buffer = self.buffer + data
- if self.logFile:
- self.logFile.write("loopback writing %s\n" % repr(data))
-
- def writeSequence(self, iovec):
- self.write("".join(iovec))
-
- def clearBuffer(self):
- if self.shouldLose == -1:
- return
-
- if self.producer:
- self.producer.resumeProducing()
- if self.buffer:
- if self.logFile:
- self.logFile.write("loopback receiving %s\n" % repr(self.buffer))
- buffer = self.buffer
- self.buffer = ''
- self.target.dataReceived(buffer)
- if self.shouldLose == 1:
- self.shouldLose = -1
- self.target.connectionLost(failure.Failure(main.CONNECTION_DONE))
-
- def loseConnection(self):
- if self.shouldLose != -1:
- self.shouldLose = 1
-
- def getHost(self):
- return 'loopback'
-
- def getPeer(self):
- return 'loopback'
-
- def registerProducer(self, producer, streaming):
- self.producer = producer
-
- def unregisterProducer(self):
- self.producer = None
-
- def logPrefix(self):
- return 'Loopback(%r)' % (self.target.__class__.__name__,)
-
-
-
-class LoopbackClientFactory(protocol.ClientFactory):
-
- def __init__(self, protocol):
- self.disconnected = 0
- self.deferred = defer.Deferred()
- self.protocol = protocol
-
- def buildProtocol(self, addr):
- return self.protocol
-
- def clientConnectionLost(self, connector, reason):
- self.disconnected = 1
- self.deferred.callback(None)
-
-
-class _FireOnClose(policies.ProtocolWrapper):
- def __init__(self, protocol, factory):
- policies.ProtocolWrapper.__init__(self, protocol, factory)
- self.deferred = defer.Deferred()
-
- def connectionLost(self, reason):
- policies.ProtocolWrapper.connectionLost(self, reason)
- self.deferred.callback(None)
-
-
-def loopbackTCP(server, client, port=0, noisy=True):
- """Run session between server and client protocol instances over TCP."""
- from twisted.internet import reactor
- f = policies.WrappingFactory(protocol.Factory())
- serverWrapper = _FireOnClose(f, server)
- f.noisy = noisy
- f.buildProtocol = lambda addr: serverWrapper
- serverPort = reactor.listenTCP(port, f, interface='127.0.0.1')
- clientF = LoopbackClientFactory(client)
- clientF.noisy = noisy
- reactor.connectTCP('127.0.0.1', serverPort.getHost().port, clientF)
- d = clientF.deferred
- d.addCallback(lambda x: serverWrapper.deferred)
- d.addCallback(lambda x: serverPort.stopListening())
- return d
-
-
-def loopbackUNIX(server, client, noisy=True):
- """Run session between server and client protocol instances over UNIX socket."""
- path = tempfile.mktemp()
- from twisted.internet import reactor
- f = policies.WrappingFactory(protocol.Factory())
- serverWrapper = _FireOnClose(f, server)
- f.noisy = noisy
- f.buildProtocol = lambda addr: serverWrapper
- serverPort = reactor.listenUNIX(path, f)
- clientF = LoopbackClientFactory(client)
- clientF.noisy = noisy
- reactor.connectUNIX(path, clientF)
- d = clientF.deferred
- d.addCallback(lambda x: serverWrapper.deferred)
- d.addCallback(lambda x: serverPort.stopListening())
- return d
diff --git a/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/memcache.py b/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/memcache.py
deleted file mode 100755
index a5e987d2..00000000
--- a/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/memcache.py
+++ /dev/null
@@ -1,758 +0,0 @@
-# -*- test-case-name: twisted.test.test_memcache -*-
-# Copyright (c) Twisted Matrix Laboratories.
-# See LICENSE for details.
-
-"""
-Memcache client protocol. Memcached is a caching server, storing data in the
-form of pairs key/value, and memcache is the protocol to talk with it.
-
-To connect to a server, create a factory for L{MemCacheProtocol}::
-
- from twisted.internet import reactor, protocol
- from twisted.protocols.memcache import MemCacheProtocol, DEFAULT_PORT
- d = protocol.ClientCreator(reactor, MemCacheProtocol
- ).connectTCP("localhost", DEFAULT_PORT)
- def doSomething(proto):
- # Here you call the memcache operations
- return proto.set("mykey", "a lot of data")
- d.addCallback(doSomething)
- reactor.run()
-
-All the operations of the memcache protocol are present, but
-L{MemCacheProtocol.set} and L{MemCacheProtocol.get} are the more important.
-
-See U{http://code.sixapart.com/svn/memcached/trunk/server/doc/protocol.txt} for
-more information about the protocol.
-"""
-
-try:
- from collections import deque
-except ImportError:
- class deque(list):
- def popleft(self):
- return self.pop(0)
-
-
-from twisted.protocols.basic import LineReceiver
-from twisted.protocols.policies import TimeoutMixin
-from twisted.internet.defer import Deferred, fail, TimeoutError
-from twisted.python import log
-
-
-
-DEFAULT_PORT = 11211
-
-
-
-class NoSuchCommand(Exception):
- """
- Exception raised when a non existent command is called.
- """
-
-
-
-class ClientError(Exception):
- """
- Error caused by an invalid client call.
- """
-
-
-
-class ServerError(Exception):
- """
- Problem happening on the server.
- """
-
-
-
-class Command(object):
- """
- Wrap a client action into an object, that holds the values used in the
- protocol.
-
- @ivar _deferred: the L{Deferred} object that will be fired when the result
- arrives.
- @type _deferred: L{Deferred}
-
- @ivar command: name of the command sent to the server.
- @type command: C{str}
- """
-
- def __init__(self, command, **kwargs):
- """
- Create a command.
-
- @param command: the name of the command.
- @type command: C{str}
-
- @param kwargs: this values will be stored as attributes of the object
- for future use
- """
- self.command = command
- self._deferred = Deferred()
- for k, v in kwargs.items():
- setattr(self, k, v)
-
-
- def success(self, value):
- """
- Shortcut method to fire the underlying deferred.
- """
- self._deferred.callback(value)
-
-
- def fail(self, error):
- """
- Make the underlying deferred fails.
- """
- self._deferred.errback(error)
-
-
-
-class MemCacheProtocol(LineReceiver, TimeoutMixin):
- """
- MemCache protocol: connect to a memcached server to store/retrieve values.
-
- @ivar persistentTimeOut: the timeout period used to wait for a response.
- @type persistentTimeOut: C{int}
-
- @ivar _current: current list of requests waiting for an answer from the
- server.
- @type _current: C{deque} of L{Command}
-
- @ivar _lenExpected: amount of data expected in raw mode, when reading for
- a value.
- @type _lenExpected: C{int}
-
- @ivar _getBuffer: current buffer of data, used to store temporary data
- when reading in raw mode.
- @type _getBuffer: C{list}
-
- @ivar _bufferLength: the total amount of bytes in C{_getBuffer}.
- @type _bufferLength: C{int}
-
- @ivar _disconnected: indicate if the connectionLost has been called or not.
- @type _disconnected: C{bool}
- """
- MAX_KEY_LENGTH = 250
- _disconnected = False
-
- def __init__(self, timeOut=60):
- """
- Create the protocol.
-
- @param timeOut: the timeout to wait before detecting that the
- connection is dead and close it. It's expressed in seconds.
- @type timeOut: C{int}
- """
- self._current = deque()
- self._lenExpected = None
- self._getBuffer = None
- self._bufferLength = None
- self.persistentTimeOut = self.timeOut = timeOut
-
-
- def _cancelCommands(self, reason):
- """
- Cancel all the outstanding commands, making them fail with C{reason}.
- """
- while self._current:
- cmd = self._current.popleft()
- cmd.fail(reason)
-
-
- def timeoutConnection(self):
- """
- Close the connection in case of timeout.
- """
- self._cancelCommands(TimeoutError("Connection timeout"))
- self.transport.loseConnection()
-
-
- def connectionLost(self, reason):
- """
- Cause any outstanding commands to fail.
- """
- self._disconnected = True
- self._cancelCommands(reason)
- LineReceiver.connectionLost(self, reason)
-
-
- def sendLine(self, line):
- """
- Override sendLine to add a timeout to response.
- """
- if not self._current:
- self.setTimeout(self.persistentTimeOut)
- LineReceiver.sendLine(self, line)
-
-
- def rawDataReceived(self, data):
- """
- Collect data for a get.
- """
- self.resetTimeout()
- self._getBuffer.append(data)
- self._bufferLength += len(data)
- if self._bufferLength >= self._lenExpected + 2:
- data = "".join(self._getBuffer)
- buf = data[:self._lenExpected]
- rem = data[self._lenExpected + 2:]
- val = buf
- self._lenExpected = None
- self._getBuffer = None
- self._bufferLength = None
- cmd = self._current[0]
- if cmd.multiple:
- flags, cas = cmd.values[cmd.currentKey]
- cmd.values[cmd.currentKey] = (flags, cas, val)
- else:
- cmd.value = val
- self.setLineMode(rem)
-
-
- def cmd_STORED(self):
- """
- Manage a success response to a set operation.
- """
- self._current.popleft().success(True)
-
-
- def cmd_NOT_STORED(self):
- """
- Manage a specific 'not stored' response to a set operation: this is not
- an error, but some condition wasn't met.
- """
- self._current.popleft().success(False)
-
-
- def cmd_END(self):
- """
- This the end token to a get or a stat operation.
- """
- cmd = self._current.popleft()
- if cmd.command == "get":
- if cmd.multiple:
- values = dict([(key, val[::2]) for key, val in
- cmd.values.iteritems()])
- cmd.success(values)
- else:
- cmd.success((cmd.flags, cmd.value))
- elif cmd.command == "gets":
- if cmd.multiple:
- cmd.success(cmd.values)
- else:
- cmd.success((cmd.flags, cmd.cas, cmd.value))
- elif cmd.command == "stats":
- cmd.success(cmd.values)
-
-
- def cmd_NOT_FOUND(self):
- """
- Manage error response for incr/decr/delete.
- """
- self._current.popleft().success(False)
-
-
- def cmd_VALUE(self, line):
- """
- Prepare the reading a value after a get.
- """
- cmd = self._current[0]
- if cmd.command == "get":
- key, flags, length = line.split()
- cas = ""
- else:
- key, flags, length, cas = line.split()
- self._lenExpected = int(length)
- self._getBuffer = []
- self._bufferLength = 0
- if cmd.multiple:
- if key not in cmd.keys:
- raise RuntimeError("Unexpected commands answer.")
- cmd.currentKey = key
- cmd.values[key] = [int(flags), cas]
- else:
- if cmd.key != key:
- raise RuntimeError("Unexpected commands answer.")
- cmd.flags = int(flags)
- cmd.cas = cas
- self.setRawMode()
-
-
- def cmd_STAT(self, line):
- """
- Reception of one stat line.
- """
- cmd = self._current[0]
- key, val = line.split(" ", 1)
- cmd.values[key] = val
-
-
- def cmd_VERSION(self, versionData):
- """
- Read version token.
- """
- self._current.popleft().success(versionData)
-
-
- def cmd_ERROR(self):
- """
- An non-existent command has been sent.
- """
- log.err("Non-existent command sent.")
- cmd = self._current.popleft()
- cmd.fail(NoSuchCommand())
-
-
- def cmd_CLIENT_ERROR(self, errText):
- """
- An invalid input as been sent.
- """
- log.err("Invalid input: %s" % (errText,))
- cmd = self._current.popleft()
- cmd.fail(ClientError(errText))
-
-
- def cmd_SERVER_ERROR(self, errText):
- """
- An error has happened server-side.
- """
- log.err("Server error: %s" % (errText,))
- cmd = self._current.popleft()
- cmd.fail(ServerError(errText))
-
-
- def cmd_DELETED(self):
- """
- A delete command has completed successfully.
- """
- self._current.popleft().success(True)
-
-
- def cmd_OK(self):
- """
- The last command has been completed.
- """
- self._current.popleft().success(True)
-
-
- def cmd_EXISTS(self):
- """
- A C{checkAndSet} update has failed.
- """
- self._current.popleft().success(False)
-
-
- def lineReceived(self, line):
- """
- Receive line commands from the server.
- """
- self.resetTimeout()
- token = line.split(" ", 1)[0]
- # First manage standard commands without space
- cmd = getattr(self, "cmd_%s" % (token,), None)
- if cmd is not None:
- args = line.split(" ", 1)[1:]
- if args:
- cmd(args[0])
- else:
- cmd()
- else:
- # Then manage commands with space in it
- line = line.replace(" ", "_")
- cmd = getattr(self, "cmd_%s" % (line,), None)
- if cmd is not None:
- cmd()
- else:
- # Increment/Decrement response
- cmd = self._current.popleft()
- val = int(line)
- cmd.success(val)
- if not self._current:
- # No pending request, remove timeout
- self.setTimeout(None)
-
-
- def increment(self, key, val=1):
- """
- Increment the value of C{key} by given value (default to 1).
- C{key} must be consistent with an int. Return the new value.
-
- @param key: the key to modify.
- @type key: C{str}
-
- @param val: the value to increment.
- @type val: C{int}
-
- @return: a deferred with will be called back with the new value
- associated with the key (after the increment).
- @rtype: L{Deferred}
- """
- return self._incrdecr("incr", key, val)
-
-
- def decrement(self, key, val=1):
- """
- Decrement the value of C{key} by given value (default to 1).
- C{key} must be consistent with an int. Return the new value, coerced to
- 0 if negative.
-
- @param key: the key to modify.
- @type key: C{str}
-
- @param val: the value to decrement.
- @type val: C{int}
-
- @return: a deferred with will be called back with the new value
- associated with the key (after the decrement).
- @rtype: L{Deferred}
- """
- return self._incrdecr("decr", key, val)
-
-
- def _incrdecr(self, cmd, key, val):
- """
- Internal wrapper for incr/decr.
- """
- if self._disconnected:
- return fail(RuntimeError("not connected"))
- if not isinstance(key, str):
- return fail(ClientError(
- "Invalid type for key: %s, expecting a string" % (type(key),)))
- if len(key) > self.MAX_KEY_LENGTH:
- return fail(ClientError("Key too long"))
- fullcmd = "%s %s %d" % (cmd, key, int(val))
- self.sendLine(fullcmd)
- cmdObj = Command(cmd, key=key)
- self._current.append(cmdObj)
- return cmdObj._deferred
-
-
- def replace(self, key, val, flags=0, expireTime=0):
- """
- Replace the given C{key}. It must already exist in the server.
-
- @param key: the key to replace.
- @type key: C{str}
-
- @param val: the new value associated with the key.
- @type val: C{str}
-
- @param flags: the flags to store with the key.
- @type flags: C{int}
-
- @param expireTime: if different from 0, the relative time in seconds
- when the key will be deleted from the store.
- @type expireTime: C{int}
-
- @return: a deferred that will fire with C{True} if the operation has
- succeeded, and C{False} with the key didn't previously exist.
- @rtype: L{Deferred}
- """
- return self._set("replace", key, val, flags, expireTime, "")
-
-
- def add(self, key, val, flags=0, expireTime=0):
- """
- Add the given C{key}. It must not exist in the server.
-
- @param key: the key to add.
- @type key: C{str}
-
- @param val: the value associated with the key.
- @type val: C{str}
-
- @param flags: the flags to store with the key.
- @type flags: C{int}
-
- @param expireTime: if different from 0, the relative time in seconds
- when the key will be deleted from the store.
- @type expireTime: C{int}
-
- @return: a deferred that will fire with C{True} if the operation has
- succeeded, and C{False} with the key already exists.
- @rtype: L{Deferred}
- """
- return self._set("add", key, val, flags, expireTime, "")
-
-
- def set(self, key, val, flags=0, expireTime=0):
- """
- Set the given C{key}.
-
- @param key: the key to set.
- @type key: C{str}
-
- @param val: the value associated with the key.
- @type val: C{str}
-
- @param flags: the flags to store with the key.
- @type flags: C{int}
-
- @param expireTime: if different from 0, the relative time in seconds
- when the key will be deleted from the store.
- @type expireTime: C{int}
-
- @return: a deferred that will fire with C{True} if the operation has
- succeeded.
- @rtype: L{Deferred}
- """
- return self._set("set", key, val, flags, expireTime, "")
-
-
- def checkAndSet(self, key, val, cas, flags=0, expireTime=0):
- """
- Change the content of C{key} only if the C{cas} value matches the
- current one associated with the key. Use this to store a value which
- hasn't been modified since last time you fetched it.
-
- @param key: The key to set.
- @type key: C{str}
-
- @param val: The value associated with the key.
- @type val: C{str}
-
- @param cas: Unique 64-bit value returned by previous call of C{get}.
- @type cas: C{str}
-
- @param flags: The flags to store with the key.
- @type flags: C{int}
-
- @param expireTime: If different from 0, the relative time in seconds
- when the key will be deleted from the store.
- @type expireTime: C{int}
-
- @return: A deferred that will fire with C{True} if the operation has
- succeeded, C{False} otherwise.
- @rtype: L{Deferred}
- """
- return self._set("cas", key, val, flags, expireTime, cas)
-
-
- def _set(self, cmd, key, val, flags, expireTime, cas):
- """
- Internal wrapper for setting values.
- """
- if self._disconnected:
- return fail(RuntimeError("not connected"))
- if not isinstance(key, str):
- return fail(ClientError(
- "Invalid type for key: %s, expecting a string" % (type(key),)))
- if len(key) > self.MAX_KEY_LENGTH:
- return fail(ClientError("Key too long"))
- if not isinstance(val, str):
- return fail(ClientError(
- "Invalid type for value: %s, expecting a string" %
- (type(val),)))
- if cas:
- cas = " " + cas
- length = len(val)
- fullcmd = "%s %s %d %d %d%s" % (
- cmd, key, flags, expireTime, length, cas)
- self.sendLine(fullcmd)
- self.sendLine(val)
- cmdObj = Command(cmd, key=key, flags=flags, length=length)
- self._current.append(cmdObj)
- return cmdObj._deferred
-
-
- def append(self, key, val):
- """
- Append given data to the value of an existing key.
-
- @param key: The key to modify.
- @type key: C{str}
-
- @param val: The value to append to the current value associated with
- the key.
- @type val: C{str}
-
- @return: A deferred that will fire with C{True} if the operation has
- succeeded, C{False} otherwise.
- @rtype: L{Deferred}
- """
- # Even if flags and expTime values are ignored, we have to pass them
- return self._set("append", key, val, 0, 0, "")
-
-
- def prepend(self, key, val):
- """
- Prepend given data to the value of an existing key.
-
- @param key: The key to modify.
- @type key: C{str}
-
- @param val: The value to prepend to the current value associated with
- the key.
- @type val: C{str}
-
- @return: A deferred that will fire with C{True} if the operation has
- succeeded, C{False} otherwise.
- @rtype: L{Deferred}
- """
- # Even if flags and expTime values are ignored, we have to pass them
- return self._set("prepend", key, val, 0, 0, "")
-
-
- def get(self, key, withIdentifier=False):
- """
- Get the given C{key}. It doesn't support multiple keys. If
- C{withIdentifier} is set to C{True}, the command issued is a C{gets},
- that will return the current identifier associated with the value. This
- identifier has to be used when issuing C{checkAndSet} update later,
- using the corresponding method.
-
- @param key: The key to retrieve.
- @type key: C{str}
-
- @param withIdentifier: If set to C{True}, retrieve the current
- identifier along with the value and the flags.
- @type withIdentifier: C{bool}
-
- @return: A deferred that will fire with the tuple (flags, value) if
- C{withIdentifier} is C{False}, or (flags, cas identifier, value)
- if C{True}. If the server indicates there is no value
- associated with C{key}, the returned value will be C{None} and
- the returned flags will be C{0}.
- @rtype: L{Deferred}
- """
- return self._get([key], withIdentifier, False)
-
-
- def getMultiple(self, keys, withIdentifier=False):
- """
- Get the given list of C{keys}. If C{withIdentifier} is set to C{True},
- the command issued is a C{gets}, that will return the identifiers
- associated with each values. This identifier has to be used when
- issuing C{checkAndSet} update later, using the corresponding method.
-
- @param keys: The keys to retrieve.
- @type keys: C{list} of C{str}
-
- @param withIdentifier: If set to C{True}, retrieve the identifiers
- along with the values and the flags.
- @type withIdentifier: C{bool}
-
- @return: A deferred that will fire with a dictionary with the elements
- of C{keys} as keys and the tuples (flags, value) as values if
- C{withIdentifier} is C{False}, or (flags, cas identifier, value) if
- C{True}. If the server indicates there is no value associated with
- C{key}, the returned values will be C{None} and the returned flags
- will be C{0}.
- @rtype: L{Deferred}
-
- @since: 9.0
- """
- return self._get(keys, withIdentifier, True)
-
- def _get(self, keys, withIdentifier, multiple):
- """
- Helper method for C{get} and C{getMultiple}.
- """
- if self._disconnected:
- return fail(RuntimeError("not connected"))
- for key in keys:
- if not isinstance(key, str):
- return fail(ClientError(
- "Invalid type for key: %s, expecting a string" % (type(key),)))
- if len(key) > self.MAX_KEY_LENGTH:
- return fail(ClientError("Key too long"))
- if withIdentifier:
- cmd = "gets"
- else:
- cmd = "get"
- fullcmd = "%s %s" % (cmd, " ".join(keys))
- self.sendLine(fullcmd)
- if multiple:
- values = dict([(key, (0, "", None)) for key in keys])
- cmdObj = Command(cmd, keys=keys, values=values, multiple=True)
- else:
- cmdObj = Command(cmd, key=keys[0], value=None, flags=0, cas="",
- multiple=False)
- self._current.append(cmdObj)
- return cmdObj._deferred
-
- def stats(self, arg=None):
- """
- Get some stats from the server. It will be available as a dict.
-
- @param arg: An optional additional string which will be sent along
- with the I{stats} command. The interpretation of this value by
- the server is left undefined by the memcache protocol
- specification.
- @type arg: L{NoneType} or L{str}
-
- @return: a deferred that will fire with a C{dict} of the available
- statistics.
- @rtype: L{Deferred}
- """
- if arg:
- cmd = "stats " + arg
- else:
- cmd = "stats"
- if self._disconnected:
- return fail(RuntimeError("not connected"))
- self.sendLine(cmd)
- cmdObj = Command("stats", values={})
- self._current.append(cmdObj)
- return cmdObj._deferred
-
-
- def version(self):
- """
- Get the version of the server.
-
- @return: a deferred that will fire with the string value of the
- version.
- @rtype: L{Deferred}
- """
- if self._disconnected:
- return fail(RuntimeError("not connected"))
- self.sendLine("version")
- cmdObj = Command("version")
- self._current.append(cmdObj)
- return cmdObj._deferred
-
-
- def delete(self, key):
- """
- Delete an existing C{key}.
-
- @param key: the key to delete.
- @type key: C{str}
-
- @return: a deferred that will be called back with C{True} if the key
- was successfully deleted, or C{False} if not.
- @rtype: L{Deferred}
- """
- if self._disconnected:
- return fail(RuntimeError("not connected"))
- if not isinstance(key, str):
- return fail(ClientError(
- "Invalid type for key: %s, expecting a string" % (type(key),)))
- self.sendLine("delete %s" % key)
- cmdObj = Command("delete", key=key)
- self._current.append(cmdObj)
- return cmdObj._deferred
-
-
- def flushAll(self):
- """
- Flush all cached values.
-
- @return: a deferred that will be called back with C{True} when the
- operation has succeeded.
- @rtype: L{Deferred}
- """
- if self._disconnected:
- return fail(RuntimeError("not connected"))
- self.sendLine("flush_all")
- cmdObj = Command("flush_all")
- self._current.append(cmdObj)
- return cmdObj._deferred
-
-
-
-__all__ = ["MemCacheProtocol", "DEFAULT_PORT", "NoSuchCommand", "ClientError",
- "ServerError"]
diff --git a/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/mice/__init__.py b/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/mice/__init__.py
deleted file mode 100755
index fda89c58..00000000
--- a/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/mice/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-"""Mice Protocols."""
diff --git a/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/mice/mouseman.py b/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/mice/mouseman.py
deleted file mode 100755
index 4071b202..00000000
--- a/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/mice/mouseman.py
+++ /dev/null
@@ -1,127 +0,0 @@
-# Copyright (c) Twisted Matrix Laboratories.
-# See LICENSE for details.
-
-#
-"""Logictech MouseMan serial protocol.
-
-http://www.softnco.demon.co.uk/SerialMouse.txt
-"""
-
-from twisted.internet import protocol
-
-class MouseMan(protocol.Protocol):
- """
-
- Parser for Logitech MouseMan serial mouse protocol (compatible
- with Microsoft Serial Mouse).
-
- """
-
- state = 'initial'
-
- leftbutton=None
- rightbutton=None
- middlebutton=None
-
- leftold=None
- rightold=None
- middleold=None
-
- horiz=None
- vert=None
- horizold=None
- vertold=None
-
- def down_left(self):
- pass
-
- def up_left(self):
- pass
-
- def down_middle(self):
- pass
-
- def up_middle(self):
- pass
-
- def down_right(self):
- pass
-
- def up_right(self):
- pass
-
- def move(self, x, y):
- pass
-
- horiz=None
- vert=None
-
- def state_initial(self, byte):
- if byte & 1<<6:
- self.word1=byte
- self.leftbutton = byte & 1<<5
- self.rightbutton = byte & 1<<4
- return 'horiz'
- else:
- return 'initial'
-
- def state_horiz(self, byte):
- if byte & 1<<6:
- return self.state_initial(byte)
- else:
- x=(self.word1 & 0x03)<<6 | (byte & 0x3f)
- if x>=128:
- x=-256+x
- self.horiz = x
- return 'vert'
-
- def state_vert(self, byte):
- if byte & 1<<6:
- # short packet
- return self.state_initial(byte)
- else:
- x = (self.word1 & 0x0c)<<4 | (byte & 0x3f)
- if x>=128:
- x=-256+x
- self.vert = x
- self.snapshot()
- return 'maybemiddle'
-
- def state_maybemiddle(self, byte):
- if byte & 1<<6:
- self.snapshot()
- return self.state_initial(byte)
- else:
- self.middlebutton=byte & 1<<5
- self.snapshot()
- return 'initial'
-
- def snapshot(self):
- if self.leftbutton and not self.leftold:
- self.down_left()
- self.leftold=1
- if not self.leftbutton and self.leftold:
- self.up_left()
- self.leftold=0
-
- if self.middlebutton and not self.middleold:
- self.down_middle()
- self.middleold=1
- if not self.middlebutton and self.middleold:
- self.up_middle()
- self.middleold=0
-
- if self.rightbutton and not self.rightold:
- self.down_right()
- self.rightold=1
- if not self.rightbutton and self.rightold:
- self.up_right()
- self.rightold=0
-
- if self.horiz or self.vert:
- self.move(self.horiz, self.vert)
-
- def dataReceived(self, data):
- for c in data:
- byte = ord(c)
- self.state = getattr(self, 'state_'+self.state)(byte)
diff --git a/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/pcp.py b/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/pcp.py
deleted file mode 100755
index 8970f901..00000000
--- a/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/pcp.py
+++ /dev/null
@@ -1,204 +0,0 @@
-# -*- test-case-name: twisted.test.test_pcp -*-
-# Copyright (c) Twisted Matrix Laboratories.
-# See LICENSE for details.
-
-"""
-Producer-Consumer Proxy.
-"""
-
-from zope.interface import implements
-
-from twisted.internet import interfaces
-
-
-class BasicProducerConsumerProxy:
- """
- I can act as a man in the middle between any Producer and Consumer.
-
- @ivar producer: the Producer I subscribe to.
- @type producer: L{IProducer<interfaces.IProducer>}
- @ivar consumer: the Consumer I publish to.
- @type consumer: L{IConsumer<interfaces.IConsumer>}
- @ivar paused: As a Producer, am I paused?
- @type paused: bool
- """
- implements(interfaces.IProducer, interfaces.IConsumer)
-
- consumer = None
- producer = None
- producerIsStreaming = None
- iAmStreaming = True
- outstandingPull = False
- paused = False
- stopped = False
-
- def __init__(self, consumer):
- self._buffer = []
- if consumer is not None:
- self.consumer = consumer
- consumer.registerProducer(self, self.iAmStreaming)
-
- # Producer methods:
-
- def pauseProducing(self):
- self.paused = True
- if self.producer:
- self.producer.pauseProducing()
-
- def resumeProducing(self):
- self.paused = False
- if self._buffer:
- # TODO: Check to see if consumer supports writeSeq.
- self.consumer.write(''.join(self._buffer))
- self._buffer[:] = []
- else:
- if not self.iAmStreaming:
- self.outstandingPull = True
-
- if self.producer is not None:
- self.producer.resumeProducing()
-
- def stopProducing(self):
- if self.producer is not None:
- self.producer.stopProducing()
- if self.consumer is not None:
- del self.consumer
-
- # Consumer methods:
-
- def write(self, data):
- if self.paused or (not self.iAmStreaming and not self.outstandingPull):
- # We could use that fifo queue here.
- self._buffer.append(data)
-
- elif self.consumer is not None:
- self.consumer.write(data)
- self.outstandingPull = False
-
- def finish(self):
- if self.consumer is not None:
- self.consumer.finish()
- self.unregisterProducer()
-
- def registerProducer(self, producer, streaming):
- self.producer = producer
- self.producerIsStreaming = streaming
-
- def unregisterProducer(self):
- if self.producer is not None:
- del self.producer
- del self.producerIsStreaming
- if self.consumer:
- self.consumer.unregisterProducer()
-
- def __repr__(self):
- return '<%s@%x around %s>' % (self.__class__, id(self), self.consumer)
-
-
-class ProducerConsumerProxy(BasicProducerConsumerProxy):
- """ProducerConsumerProxy with a finite buffer.
-
- When my buffer fills up, I have my parent Producer pause until my buffer
- has room in it again.
- """
- # Copies much from abstract.FileDescriptor
- bufferSize = 2**2**2**2
-
- producerPaused = False
- unregistered = False
-
- def pauseProducing(self):
- # Does *not* call up to ProducerConsumerProxy to relay the pause
- # message through to my parent Producer.
- self.paused = True
-
- def resumeProducing(self):
- self.paused = False
- if self._buffer:
- data = ''.join(self._buffer)
- bytesSent = self._writeSomeData(data)
- if bytesSent < len(data):
- unsent = data[bytesSent:]
- assert not self.iAmStreaming, (
- "Streaming producer did not write all its data.")
- self._buffer[:] = [unsent]
- else:
- self._buffer[:] = []
- else:
- bytesSent = 0
-
- if (self.unregistered and bytesSent and not self._buffer and
- self.consumer is not None):
- self.consumer.unregisterProducer()
-
- if not self.iAmStreaming:
- self.outstandingPull = not bytesSent
-
- if self.producer is not None:
- bytesBuffered = sum([len(s) for s in self._buffer])
- # TODO: You can see here the potential for high and low
- # watermarks, where bufferSize would be the high mark when we
- # ask the upstream producer to pause, and we wouldn't have
- # it resume again until it hit the low mark. Or if producer
- # is Pull, maybe we'd like to pull from it as much as necessary
- # to keep our buffer full to the low mark, so we're never caught
- # without something to send.
- if self.producerPaused and (bytesBuffered < self.bufferSize):
- # Now that our buffer is empty,
- self.producerPaused = False
- self.producer.resumeProducing()
- elif self.outstandingPull:
- # I did not have any data to write in response to a pull,
- # so I'd better pull some myself.
- self.producer.resumeProducing()
-
- def write(self, data):
- if self.paused or (not self.iAmStreaming and not self.outstandingPull):
- # We could use that fifo queue here.
- self._buffer.append(data)
-
- elif self.consumer is not None:
- assert not self._buffer, (
- "Writing fresh data to consumer before my buffer is empty!")
- # I'm going to use _writeSomeData here so that there is only one
- # path to self.consumer.write. But it doesn't actually make sense,
- # if I am streaming, for some data to not be all data. But maybe I
- # am not streaming, but I am writing here anyway, because there was
- # an earlier request for data which was not answered.
- bytesSent = self._writeSomeData(data)
- self.outstandingPull = False
- if not bytesSent == len(data):
- assert not self.iAmStreaming, (
- "Streaming producer did not write all its data.")
- self._buffer.append(data[bytesSent:])
-
- if (self.producer is not None) and self.producerIsStreaming:
- bytesBuffered = sum([len(s) for s in self._buffer])
- if bytesBuffered >= self.bufferSize:
-
- self.producer.pauseProducing()
- self.producerPaused = True
-
- def registerProducer(self, producer, streaming):
- self.unregistered = False
- BasicProducerConsumerProxy.registerProducer(self, producer, streaming)
- if not streaming:
- producer.resumeProducing()
-
- def unregisterProducer(self):
- if self.producer is not None:
- del self.producer
- del self.producerIsStreaming
- self.unregistered = True
- if self.consumer and not self._buffer:
- self.consumer.unregisterProducer()
-
- def _writeSomeData(self, data):
- """Write as much of this data as possible.
-
- @returns: The number of bytes written.
- """
- if self.consumer is None:
- return 0
- self.consumer.write(data)
- return len(data)
diff --git a/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/policies.py b/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/policies.py
deleted file mode 100755
index 83a3ec7d..00000000
--- a/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/policies.py
+++ /dev/null
@@ -1,725 +0,0 @@
-# -*- test-case-name: twisted.test.test_policies -*-
-# Copyright (c) Twisted Matrix Laboratories.
-# See LICENSE for details.
-
-"""
-Resource limiting policies.
-
-@seealso: See also L{twisted.protocols.htb} for rate limiting.
-"""
-
-# system imports
-import sys, operator
-
-from zope.interface import directlyProvides, providedBy
-
-# twisted imports
-from twisted.internet.protocol import ServerFactory, Protocol, ClientFactory
-from twisted.internet import error
-from twisted.internet.interfaces import ILoggingContext
-from twisted.python import log
-
-
-def _wrappedLogPrefix(wrapper, wrapped):
- """
- Compute a log prefix for a wrapper and the object it wraps.
-
- @rtype: C{str}
- """
- if ILoggingContext.providedBy(wrapped):
- logPrefix = wrapped.logPrefix()
- else:
- logPrefix = wrapped.__class__.__name__
- return "%s (%s)" % (logPrefix, wrapper.__class__.__name__)
-
-
-
-class ProtocolWrapper(Protocol):
- """
- Wraps protocol instances and acts as their transport as well.
-
- @ivar wrappedProtocol: An L{IProtocol<twisted.internet.interfaces.IProtocol>}
- provider to which L{IProtocol<twisted.internet.interfaces.IProtocol>}
- method calls onto this L{ProtocolWrapper} will be proxied.
-
- @ivar factory: The L{WrappingFactory} which created this
- L{ProtocolWrapper}.
- """
-
- disconnecting = 0
-
- def __init__(self, factory, wrappedProtocol):
- self.wrappedProtocol = wrappedProtocol
- self.factory = factory
-
-
- def logPrefix(self):
- """
- Use a customized log prefix mentioning both the wrapped protocol and
- the current one.
- """
- return _wrappedLogPrefix(self, self.wrappedProtocol)
-
-
- def makeConnection(self, transport):
- """
- When a connection is made, register this wrapper with its factory,
- save the real transport, and connect the wrapped protocol to this
- L{ProtocolWrapper} to intercept any transport calls it makes.
- """
- directlyProvides(self, providedBy(transport))
- Protocol.makeConnection(self, transport)
- self.factory.registerProtocol(self)
- self.wrappedProtocol.makeConnection(self)
-
-
- # Transport relaying
-
- def write(self, data):
- self.transport.write(data)
-
-
- def writeSequence(self, data):
- self.transport.writeSequence(data)
-
-
- def loseConnection(self):
- self.disconnecting = 1
- self.transport.loseConnection()
-
-
- def getPeer(self):
- return self.transport.getPeer()
-
-
- def getHost(self):
- return self.transport.getHost()
-
-
- def registerProducer(self, producer, streaming):
- self.transport.registerProducer(producer, streaming)
-
-
- def unregisterProducer(self):
- self.transport.unregisterProducer()
-
-
- def stopConsuming(self):
- self.transport.stopConsuming()
-
-
- def __getattr__(self, name):
- return getattr(self.transport, name)
-
-
- # Protocol relaying
-
- def dataReceived(self, data):
- self.wrappedProtocol.dataReceived(data)
-
-
- def connectionLost(self, reason):
- self.factory.unregisterProtocol(self)
- self.wrappedProtocol.connectionLost(reason)
-
-
-
-class WrappingFactory(ClientFactory):
- """
- Wraps a factory and its protocols, and keeps track of them.
- """
-
- protocol = ProtocolWrapper
-
- def __init__(self, wrappedFactory):
- self.wrappedFactory = wrappedFactory
- self.protocols = {}
-
-
- def logPrefix(self):
- """
- Generate a log prefix mentioning both the wrapped factory and this one.
- """
- return _wrappedLogPrefix(self, self.wrappedFactory)
-
-
- def doStart(self):
- self.wrappedFactory.doStart()
- ClientFactory.doStart(self)
-
-
- def doStop(self):
- self.wrappedFactory.doStop()
- ClientFactory.doStop(self)
-
-
- def startedConnecting(self, connector):
- self.wrappedFactory.startedConnecting(connector)
-
-
- def clientConnectionFailed(self, connector, reason):
- self.wrappedFactory.clientConnectionFailed(connector, reason)
-
-
- def clientConnectionLost(self, connector, reason):
- self.wrappedFactory.clientConnectionLost(connector, reason)
-
-
- def buildProtocol(self, addr):
- return self.protocol(self, self.wrappedFactory.buildProtocol(addr))
-
-
- def registerProtocol(self, p):
- """
- Called by protocol to register itself.
- """
- self.protocols[p] = 1
-
-
- def unregisterProtocol(self, p):
- """
- Called by protocols when they go away.
- """
- del self.protocols[p]
-
-
-
-class ThrottlingProtocol(ProtocolWrapper):
- """Protocol for ThrottlingFactory."""
-
- # wrap API for tracking bandwidth
-
- def write(self, data):
- self.factory.registerWritten(len(data))
- ProtocolWrapper.write(self, data)
-
- def writeSequence(self, seq):
- self.factory.registerWritten(reduce(operator.add, map(len, seq)))
- ProtocolWrapper.writeSequence(self, seq)
-
- def dataReceived(self, data):
- self.factory.registerRead(len(data))
- ProtocolWrapper.dataReceived(self, data)
-
- def registerProducer(self, producer, streaming):
- self.producer = producer
- ProtocolWrapper.registerProducer(self, producer, streaming)
-
- def unregisterProducer(self):
- del self.producer
- ProtocolWrapper.unregisterProducer(self)
-
-
- def throttleReads(self):
- self.transport.pauseProducing()
-
- def unthrottleReads(self):
- self.transport.resumeProducing()
-
- def throttleWrites(self):
- if hasattr(self, "producer"):
- self.producer.pauseProducing()
-
- def unthrottleWrites(self):
- if hasattr(self, "producer"):
- self.producer.resumeProducing()
-
-
-class ThrottlingFactory(WrappingFactory):
- """
- Throttles bandwidth and number of connections.
-
- Write bandwidth will only be throttled if there is a producer
- registered.
- """
-
- protocol = ThrottlingProtocol
-
- def __init__(self, wrappedFactory, maxConnectionCount=sys.maxint,
- readLimit=None, writeLimit=None):
- WrappingFactory.__init__(self, wrappedFactory)
- self.connectionCount = 0
- self.maxConnectionCount = maxConnectionCount
- self.readLimit = readLimit # max bytes we should read per second
- self.writeLimit = writeLimit # max bytes we should write per second
- self.readThisSecond = 0
- self.writtenThisSecond = 0
- self.unthrottleReadsID = None
- self.checkReadBandwidthID = None
- self.unthrottleWritesID = None
- self.checkWriteBandwidthID = None
-
-
- def callLater(self, period, func):
- """
- Wrapper around L{reactor.callLater} for test purpose.
- """
- from twisted.internet import reactor
- return reactor.callLater(period, func)
-
-
- def registerWritten(self, length):
- """
- Called by protocol to tell us more bytes were written.
- """
- self.writtenThisSecond += length
-
-
- def registerRead(self, length):
- """
- Called by protocol to tell us more bytes were read.
- """
- self.readThisSecond += length
-
-
- def checkReadBandwidth(self):
- """
- Checks if we've passed bandwidth limits.
- """
- if self.readThisSecond > self.readLimit:
- self.throttleReads()
- throttleTime = (float(self.readThisSecond) / self.readLimit) - 1.0
- self.unthrottleReadsID = self.callLater(throttleTime,
- self.unthrottleReads)
- self.readThisSecond = 0
- self.checkReadBandwidthID = self.callLater(1, self.checkReadBandwidth)
-
-
- def checkWriteBandwidth(self):
- if self.writtenThisSecond > self.writeLimit:
- self.throttleWrites()
- throttleTime = (float(self.writtenThisSecond) / self.writeLimit) - 1.0
- self.unthrottleWritesID = self.callLater(throttleTime,
- self.unthrottleWrites)
- # reset for next round
- self.writtenThisSecond = 0
- self.checkWriteBandwidthID = self.callLater(1, self.checkWriteBandwidth)
-
-
- def throttleReads(self):
- """
- Throttle reads on all protocols.
- """
- log.msg("Throttling reads on %s" % self)
- for p in self.protocols.keys():
- p.throttleReads()
-
-
- def unthrottleReads(self):
- """
- Stop throttling reads on all protocols.
- """
- self.unthrottleReadsID = None
- log.msg("Stopped throttling reads on %s" % self)
- for p in self.protocols.keys():
- p.unthrottleReads()
-
-
- def throttleWrites(self):
- """
- Throttle writes on all protocols.
- """
- log.msg("Throttling writes on %s" % self)
- for p in self.protocols.keys():
- p.throttleWrites()
-
-
- def unthrottleWrites(self):
- """
- Stop throttling writes on all protocols.
- """
- self.unthrottleWritesID = None
- log.msg("Stopped throttling writes on %s" % self)
- for p in self.protocols.keys():
- p.unthrottleWrites()
-
-
- def buildProtocol(self, addr):
- if self.connectionCount == 0:
- if self.readLimit is not None:
- self.checkReadBandwidth()
- if self.writeLimit is not None:
- self.checkWriteBandwidth()
-
- if self.connectionCount < self.maxConnectionCount:
- self.connectionCount += 1
- return WrappingFactory.buildProtocol(self, addr)
- else:
- log.msg("Max connection count reached!")
- return None
-
-
- def unregisterProtocol(self, p):
- WrappingFactory.unregisterProtocol(self, p)
- self.connectionCount -= 1
- if self.connectionCount == 0:
- if self.unthrottleReadsID is not None:
- self.unthrottleReadsID.cancel()
- if self.checkReadBandwidthID is not None:
- self.checkReadBandwidthID.cancel()
- if self.unthrottleWritesID is not None:
- self.unthrottleWritesID.cancel()
- if self.checkWriteBandwidthID is not None:
- self.checkWriteBandwidthID.cancel()
-
-
-
-class SpewingProtocol(ProtocolWrapper):
- def dataReceived(self, data):
- log.msg("Received: %r" % data)
- ProtocolWrapper.dataReceived(self,data)
-
- def write(self, data):
- log.msg("Sending: %r" % data)
- ProtocolWrapper.write(self,data)
-
-
-
-class SpewingFactory(WrappingFactory):
- protocol = SpewingProtocol
-
-
-
-class LimitConnectionsByPeer(WrappingFactory):
-
- maxConnectionsPerPeer = 5
-
- def startFactory(self):
- self.peerConnections = {}
-
- def buildProtocol(self, addr):
- peerHost = addr[0]
- connectionCount = self.peerConnections.get(peerHost, 0)
- if connectionCount >= self.maxConnectionsPerPeer:
- return None
- self.peerConnections[peerHost] = connectionCount + 1
- return WrappingFactory.buildProtocol(self, addr)
-
- def unregisterProtocol(self, p):
- peerHost = p.getPeer()[1]
- self.peerConnections[peerHost] -= 1
- if self.peerConnections[peerHost] == 0:
- del self.peerConnections[peerHost]
-
-
-class LimitTotalConnectionsFactory(ServerFactory):
- """
- Factory that limits the number of simultaneous connections.
-
- @type connectionCount: C{int}
- @ivar connectionCount: number of current connections.
- @type connectionLimit: C{int} or C{None}
- @cvar connectionLimit: maximum number of connections.
- @type overflowProtocol: L{Protocol} or C{None}
- @cvar overflowProtocol: Protocol to use for new connections when
- connectionLimit is exceeded. If C{None} (the default value), excess
- connections will be closed immediately.
- """
- connectionCount = 0
- connectionLimit = None
- overflowProtocol = None
-
- def buildProtocol(self, addr):
- if (self.connectionLimit is None or
- self.connectionCount < self.connectionLimit):
- # Build the normal protocol
- wrappedProtocol = self.protocol()
- elif self.overflowProtocol is None:
- # Just drop the connection
- return None
- else:
- # Too many connections, so build the overflow protocol
- wrappedProtocol = self.overflowProtocol()
-
- wrappedProtocol.factory = self
- protocol = ProtocolWrapper(self, wrappedProtocol)
- self.connectionCount += 1
- return protocol
-
- def registerProtocol(self, p):
- pass
-
- def unregisterProtocol(self, p):
- self.connectionCount -= 1
-
-
-
-class TimeoutProtocol(ProtocolWrapper):
- """
- Protocol that automatically disconnects when the connection is idle.
- """
-
- def __init__(self, factory, wrappedProtocol, timeoutPeriod):
- """
- Constructor.
-
- @param factory: An L{IFactory}.
- @param wrappedProtocol: A L{Protocol} to wrapp.
- @param timeoutPeriod: Number of seconds to wait for activity before
- timing out.
- """
- ProtocolWrapper.__init__(self, factory, wrappedProtocol)
- self.timeoutCall = None
- self.setTimeout(timeoutPeriod)
-
-
- def setTimeout(self, timeoutPeriod=None):
- """
- Set a timeout.
-
- This will cancel any existing timeouts.
-
- @param timeoutPeriod: If not C{None}, change the timeout period.
- Otherwise, use the existing value.
- """
- self.cancelTimeout()
- if timeoutPeriod is not None:
- self.timeoutPeriod = timeoutPeriod
- self.timeoutCall = self.factory.callLater(self.timeoutPeriod, self.timeoutFunc)
-
-
- def cancelTimeout(self):
- """
- Cancel the timeout.
-
- If the timeout was already cancelled, this does nothing.
- """
- if self.timeoutCall:
- try:
- self.timeoutCall.cancel()
- except error.AlreadyCalled:
- pass
- self.timeoutCall = None
-
-
- def resetTimeout(self):
- """
- Reset the timeout, usually because some activity just happened.
- """
- if self.timeoutCall:
- self.timeoutCall.reset(self.timeoutPeriod)
-
-
- def write(self, data):
- self.resetTimeout()
- ProtocolWrapper.write(self, data)
-
-
- def writeSequence(self, seq):
- self.resetTimeout()
- ProtocolWrapper.writeSequence(self, seq)
-
-
- def dataReceived(self, data):
- self.resetTimeout()
- ProtocolWrapper.dataReceived(self, data)
-
-
- def connectionLost(self, reason):
- self.cancelTimeout()
- ProtocolWrapper.connectionLost(self, reason)
-
-
- def timeoutFunc(self):
- """
- This method is called when the timeout is triggered.
-
- By default it calls L{loseConnection}. Override this if you want
- something else to happen.
- """
- self.loseConnection()
-
-
-
-class TimeoutFactory(WrappingFactory):
- """
- Factory for TimeoutWrapper.
- """
- protocol = TimeoutProtocol
-
-
- def __init__(self, wrappedFactory, timeoutPeriod=30*60):
- self.timeoutPeriod = timeoutPeriod
- WrappingFactory.__init__(self, wrappedFactory)
-
-
- def buildProtocol(self, addr):
- return self.protocol(self, self.wrappedFactory.buildProtocol(addr),
- timeoutPeriod=self.timeoutPeriod)
-
-
- def callLater(self, period, func):
- """
- Wrapper around L{reactor.callLater} for test purpose.
- """
- from twisted.internet import reactor
- return reactor.callLater(period, func)
-
-
-
-class TrafficLoggingProtocol(ProtocolWrapper):
-
- def __init__(self, factory, wrappedProtocol, logfile, lengthLimit=None,
- number=0):
- """
- @param factory: factory which created this protocol.
- @type factory: C{protocol.Factory}.
- @param wrappedProtocol: the underlying protocol.
- @type wrappedProtocol: C{protocol.Protocol}.
- @param logfile: file opened for writing used to write log messages.
- @type logfile: C{file}
- @param lengthLimit: maximum size of the datareceived logged.
- @type lengthLimit: C{int}
- @param number: identifier of the connection.
- @type number: C{int}.
- """
- ProtocolWrapper.__init__(self, factory, wrappedProtocol)
- self.logfile = logfile
- self.lengthLimit = lengthLimit
- self._number = number
-
-
- def _log(self, line):
- self.logfile.write(line + '\n')
- self.logfile.flush()
-
-
- def _mungeData(self, data):
- if self.lengthLimit and len(data) > self.lengthLimit:
- data = data[:self.lengthLimit - 12] + '<... elided>'
- return data
-
-
- # IProtocol
- def connectionMade(self):
- self._log('*')
- return ProtocolWrapper.connectionMade(self)
-
-
- def dataReceived(self, data):
- self._log('C %d: %r' % (self._number, self._mungeData(data)))
- return ProtocolWrapper.dataReceived(self, data)
-
-
- def connectionLost(self, reason):
- self._log('C %d: %r' % (self._number, reason))
- return ProtocolWrapper.connectionLost(self, reason)
-
-
- # ITransport
- def write(self, data):
- self._log('S %d: %r' % (self._number, self._mungeData(data)))
- return ProtocolWrapper.write(self, data)
-
-
- def writeSequence(self, iovec):
- self._log('SV %d: %r' % (self._number, [self._mungeData(d) for d in iovec]))
- return ProtocolWrapper.writeSequence(self, iovec)
-
-
- def loseConnection(self):
- self._log('S %d: *' % (self._number,))
- return ProtocolWrapper.loseConnection(self)
-
-
-
-class TrafficLoggingFactory(WrappingFactory):
- protocol = TrafficLoggingProtocol
-
- _counter = 0
-
- def __init__(self, wrappedFactory, logfilePrefix, lengthLimit=None):
- self.logfilePrefix = logfilePrefix
- self.lengthLimit = lengthLimit
- WrappingFactory.__init__(self, wrappedFactory)
-
-
- def open(self, name):
- return file(name, 'w')
-
-
- def buildProtocol(self, addr):
- self._counter += 1
- logfile = self.open(self.logfilePrefix + '-' + str(self._counter))
- return self.protocol(self, self.wrappedFactory.buildProtocol(addr),
- logfile, self.lengthLimit, self._counter)
-
-
- def resetCounter(self):
- """
- Reset the value of the counter used to identify connections.
- """
- self._counter = 0
-
-
-
-class TimeoutMixin:
- """
- Mixin for protocols which wish to timeout connections.
-
- Protocols that mix this in have a single timeout, set using L{setTimeout}.
- When the timeout is hit, L{timeoutConnection} is called, which, by
- default, closes the connection.
-
- @cvar timeOut: The number of seconds after which to timeout the connection.
- """
- timeOut = None
-
- __timeoutCall = None
-
- def callLater(self, period, func):
- """
- Wrapper around L{reactor.callLater} for test purpose.
- """
- from twisted.internet import reactor
- return reactor.callLater(period, func)
-
-
- def resetTimeout(self):
- """
- Reset the timeout count down.
-
- If the connection has already timed out, then do nothing. If the
- timeout has been cancelled (probably using C{setTimeout(None)}), also
- do nothing.
-
- It's often a good idea to call this when the protocol has received
- some meaningful input from the other end of the connection. "I've got
- some data, they're still there, reset the timeout".
- """
- if self.__timeoutCall is not None and self.timeOut is not None:
- self.__timeoutCall.reset(self.timeOut)
-
- def setTimeout(self, period):
- """
- Change the timeout period
-
- @type period: C{int} or C{NoneType}
- @param period: The period, in seconds, to change the timeout to, or
- C{None} to disable the timeout.
- """
- prev = self.timeOut
- self.timeOut = period
-
- if self.__timeoutCall is not None:
- if period is None:
- self.__timeoutCall.cancel()
- self.__timeoutCall = None
- else:
- self.__timeoutCall.reset(period)
- elif period is not None:
- self.__timeoutCall = self.callLater(period, self.__timedOut)
-
- return prev
-
- def __timedOut(self):
- self.__timeoutCall = None
- self.timeoutConnection()
-
- def timeoutConnection(self):
- """
- Called when the connection times out.
-
- Override to define behavior other than dropping the connection.
- """
- self.transport.loseConnection()
diff --git a/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/portforward.py b/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/portforward.py
deleted file mode 100755
index 626d5aaf..00000000
--- a/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/portforward.py
+++ /dev/null
@@ -1,87 +0,0 @@
-
-# Copyright (c) Twisted Matrix Laboratories.
-# See LICENSE for details.
-
-"""
-A simple port forwarder.
-"""
-
-# Twisted imports
-from twisted.internet import protocol
-from twisted.python import log
-
-class Proxy(protocol.Protocol):
- noisy = True
-
- peer = None
-
- def setPeer(self, peer):
- self.peer = peer
-
- def connectionLost(self, reason):
- if self.peer is not None:
- self.peer.transport.loseConnection()
- self.peer = None
- elif self.noisy:
- log.msg("Unable to connect to peer: %s" % (reason,))
-
- def dataReceived(self, data):
- self.peer.transport.write(data)
-
-class ProxyClient(Proxy):
- def connectionMade(self):
- self.peer.setPeer(self)
-
- # Wire this and the peer transport together to enable
- # flow control (this stops connections from filling
- # this proxy memory when one side produces data at a
- # higher rate than the other can consume).
- self.transport.registerProducer(self.peer.transport, True)
- self.peer.transport.registerProducer(self.transport, True)
-
- # We're connected, everybody can read to their hearts content.
- self.peer.transport.resumeProducing()
-
-class ProxyClientFactory(protocol.ClientFactory):
-
- protocol = ProxyClient
-
- def setServer(self, server):
- self.server = server
-
- def buildProtocol(self, *args, **kw):
- prot = protocol.ClientFactory.buildProtocol(self, *args, **kw)
- prot.setPeer(self.server)
- return prot
-
- def clientConnectionFailed(self, connector, reason):
- self.server.transport.loseConnection()
-
-
-class ProxyServer(Proxy):
-
- clientProtocolFactory = ProxyClientFactory
- reactor = None
-
- def connectionMade(self):
- # Don't read anything from the connecting client until we have
- # somewhere to send it to.
- self.transport.pauseProducing()
-
- client = self.clientProtocolFactory()
- client.setServer(self)
-
- if self.reactor is None:
- from twisted.internet import reactor
- self.reactor = reactor
- self.reactor.connectTCP(self.factory.host, self.factory.port, client)
-
-
-class ProxyFactory(protocol.Factory):
- """Factory for port forwarder."""
-
- protocol = ProxyServer
-
- def __init__(self, host, port):
- self.host = host
- self.port = port
diff --git a/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/postfix.py b/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/postfix.py
deleted file mode 100755
index 7a2079d7..00000000
--- a/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/postfix.py
+++ /dev/null
@@ -1,112 +0,0 @@
-# -*- test-case-name: twisted.test.test_postfix -*-
-# Copyright (c) Twisted Matrix Laboratories.
-# See LICENSE for details.
-
-"""
-Postfix mail transport agent related protocols.
-"""
-
-import sys
-import UserDict
-import urllib
-
-from twisted.protocols import basic
-from twisted.protocols import policies
-from twisted.internet import protocol, defer
-from twisted.python import log
-
-# urllib's quote functions just happen to match
-# the postfix semantics.
-def quote(s):
- return urllib.quote(s)
-
-def unquote(s):
- return urllib.unquote(s)
-
-class PostfixTCPMapServer(basic.LineReceiver, policies.TimeoutMixin):
- """Postfix mail transport agent TCP map protocol implementation.
-
- Receive requests for data matching given key via lineReceived,
- asks it's factory for the data with self.factory.get(key), and
- returns the data to the requester. None means no entry found.
-
- You can use postfix's postmap to test the map service::
-
- /usr/sbin/postmap -q KEY tcp:localhost:4242
-
- """
-
- timeout = 600
- delimiter = '\n'
-
- def connectionMade(self):
- self.setTimeout(self.timeout)
-
- def sendCode(self, code, message=''):
- "Send an SMTP-like code with a message."
- self.sendLine('%3.3d %s' % (code, message or ''))
-
- def lineReceived(self, line):
- self.resetTimeout()
- try:
- request, params = line.split(None, 1)
- except ValueError:
- request = line
- params = None
- try:
- f = getattr(self, 'do_' + request)
- except AttributeError:
- self.sendCode(400, 'unknown command')
- else:
- try:
- f(params)
- except:
- self.sendCode(400, 'Command %r failed: %s.' % (request, sys.exc_info()[1]))
-
- def do_get(self, key):
- if key is None:
- self.sendCode(400, 'Command %r takes 1 parameters.' % 'get')
- else:
- d = defer.maybeDeferred(self.factory.get, key)
- d.addCallbacks(self._cbGot, self._cbNot)
- d.addErrback(log.err)
-
- def _cbNot(self, fail):
- self.sendCode(400, fail.getErrorMessage())
-
- def _cbGot(self, value):
- if value is None:
- self.sendCode(500)
- else:
- self.sendCode(200, quote(value))
-
- def do_put(self, keyAndValue):
- if keyAndValue is None:
- self.sendCode(400, 'Command %r takes 2 parameters.' % 'put')
- else:
- try:
- key, value = keyAndValue.split(None, 1)
- except ValueError:
- self.sendCode(400, 'Command %r takes 2 parameters.' % 'put')
- else:
- self.sendCode(500, 'put is not implemented yet.')
-
-
-class PostfixTCPMapDictServerFactory(protocol.ServerFactory,
- UserDict.UserDict):
- """An in-memory dictionary factory for PostfixTCPMapServer."""
-
- protocol = PostfixTCPMapServer
-
-class PostfixTCPMapDeferringDictServerFactory(protocol.ServerFactory):
- """An in-memory dictionary factory for PostfixTCPMapServer."""
-
- protocol = PostfixTCPMapServer
-
- def __init__(self, data=None):
- self.data = {}
- if data is not None:
- self.data.update(data)
-
- def get(self, key):
- return defer.succeed(self.data.get(key))
diff --git a/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/shoutcast.py b/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/shoutcast.py
deleted file mode 100755
index 317d5e85..00000000
--- a/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/shoutcast.py
+++ /dev/null
@@ -1,111 +0,0 @@
-# Copyright (c) Twisted Matrix Laboratories.
-# See LICENSE for details.
-
-"""
-Chop up shoutcast stream into MP3s and metadata, if available.
-"""
-
-from twisted.web import http
-from twisted import copyright
-
-
-class ShoutcastClient(http.HTTPClient):
- """
- Shoutcast HTTP stream.
-
- Modes can be 'length', 'meta' and 'mp3'.
-
- See U{http://www.smackfu.com/stuff/programming/shoutcast.html}
- for details on the protocol.
- """
-
- userAgent = "Twisted Shoutcast client " + copyright.version
-
- def __init__(self, path="/"):
- self.path = path
- self.got_metadata = False
- self.metaint = None
- self.metamode = "mp3"
- self.databuffer = ""
-
- def connectionMade(self):
- self.sendCommand("GET", self.path)
- self.sendHeader("User-Agent", self.userAgent)
- self.sendHeader("Icy-MetaData", "1")
- self.endHeaders()
-
- def lineReceived(self, line):
- # fix shoutcast crappiness
- if not self.firstLine and line:
- if len(line.split(": ", 1)) == 1:
- line = line.replace(":", ": ", 1)
- http.HTTPClient.lineReceived(self, line)
-
- def handleHeader(self, key, value):
- if key.lower() == 'icy-metaint':
- self.metaint = int(value)
- self.got_metadata = True
-
- def handleEndHeaders(self):
- # Lets check if we got metadata, and set the
- # appropriate handleResponsePart method.
- if self.got_metadata:
- # if we have metadata, then it has to be parsed out of the data stream
- self.handleResponsePart = self.handleResponsePart_with_metadata
- else:
- # otherwise, all the data is MP3 data
- self.handleResponsePart = self.gotMP3Data
-
- def handleResponsePart_with_metadata(self, data):
- self.databuffer += data
- while self.databuffer:
- stop = getattr(self, "handle_%s" % self.metamode)()
- if stop:
- return
-
- def handle_length(self):
- self.remaining = ord(self.databuffer[0]) * 16
- self.databuffer = self.databuffer[1:]
- self.metamode = "meta"
-
- def handle_mp3(self):
- if len(self.databuffer) > self.metaint:
- self.gotMP3Data(self.databuffer[:self.metaint])
- self.databuffer = self.databuffer[self.metaint:]
- self.metamode = "length"
- else:
- return 1
-
- def handle_meta(self):
- if len(self.databuffer) >= self.remaining:
- if self.remaining:
- data = self.databuffer[:self.remaining]
- self.gotMetaData(self.parseMetadata(data))
- self.databuffer = self.databuffer[self.remaining:]
- self.metamode = "mp3"
- else:
- return 1
-
- def parseMetadata(self, data):
- meta = []
- for chunk in data.split(';'):
- chunk = chunk.strip().replace("\x00", "")
- if not chunk:
- continue
- key, value = chunk.split('=', 1)
- if value.startswith("'") and value.endswith("'"):
- value = value[1:-1]
- meta.append((key, value))
- return meta
-
- def gotMetaData(self, metadata):
- """Called with a list of (key, value) pairs of metadata,
- if metadata is available on the server.
-
- Will only be called on non-empty metadata.
- """
- raise NotImplementedError, "implement in subclass"
-
- def gotMP3Data(self, data):
- """Called with chunk of MP3 data."""
- raise NotImplementedError, "implement in subclass"
diff --git a/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/sip.py b/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/sip.py
deleted file mode 100755
index 8a3f05cb..00000000
--- a/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/sip.py
+++ /dev/null
@@ -1,1347 +0,0 @@
-# -*- test-case-name: twisted.test.test_sip -*-
-
-# Copyright (c) Twisted Matrix Laboratories.
-# See LICENSE for details.
-
-
-"""Session Initialization Protocol.
-
-Documented in RFC 2543.
-[Superceded by 3261]
-
-
-This module contains a deprecated implementation of HTTP Digest authentication.
-See L{twisted.cred.credentials} and L{twisted.cred._digest} for its new home.
-"""
-
-# system imports
-import socket, time, sys, random, warnings
-from zope.interface import implements, Interface
-
-# twisted imports
-from twisted.python import log, util
-from twisted.python.deprecate import deprecated
-from twisted.python.versions import Version
-from twisted.python.hashlib import md5
-from twisted.internet import protocol, defer, reactor
-
-from twisted import cred
-import twisted.cred.error
-from twisted.cred.credentials import UsernameHashedPassword, UsernamePassword
-
-
-# sibling imports
-from twisted.protocols import basic
-
-PORT = 5060
-
-# SIP headers have short forms
-shortHeaders = {"call-id": "i",
- "contact": "m",
- "content-encoding": "e",
- "content-length": "l",
- "content-type": "c",
- "from": "f",
- "subject": "s",
- "to": "t",
- "via": "v",
- }
-
-longHeaders = {}
-for k, v in shortHeaders.items():
- longHeaders[v] = k
-del k, v
-
-statusCodes = {
- 100: "Trying",
- 180: "Ringing",
- 181: "Call Is Being Forwarded",
- 182: "Queued",
- 183: "Session Progress",
-
- 200: "OK",
-
- 300: "Multiple Choices",
- 301: "Moved Permanently",
- 302: "Moved Temporarily",
- 303: "See Other",
- 305: "Use Proxy",
- 380: "Alternative Service",
-
- 400: "Bad Request",
- 401: "Unauthorized",
- 402: "Payment Required",
- 403: "Forbidden",
- 404: "Not Found",
- 405: "Method Not Allowed",
- 406: "Not Acceptable",
- 407: "Proxy Authentication Required",
- 408: "Request Timeout",
- 409: "Conflict", # Not in RFC3261
- 410: "Gone",
- 411: "Length Required", # Not in RFC3261
- 413: "Request Entity Too Large",
- 414: "Request-URI Too Large",
- 415: "Unsupported Media Type",
- 416: "Unsupported URI Scheme",
- 420: "Bad Extension",
- 421: "Extension Required",
- 423: "Interval Too Brief",
- 480: "Temporarily Unavailable",
- 481: "Call/Transaction Does Not Exist",
- 482: "Loop Detected",
- 483: "Too Many Hops",
- 484: "Address Incomplete",
- 485: "Ambiguous",
- 486: "Busy Here",
- 487: "Request Terminated",
- 488: "Not Acceptable Here",
- 491: "Request Pending",
- 493: "Undecipherable",
-
- 500: "Internal Server Error",
- 501: "Not Implemented",
- 502: "Bad Gateway", # no donut
- 503: "Service Unavailable",
- 504: "Server Time-out",
- 505: "SIP Version not supported",
- 513: "Message Too Large",
-
- 600: "Busy Everywhere",
- 603: "Decline",
- 604: "Does not exist anywhere",
- 606: "Not Acceptable",
-}
-
-specialCases = {
- 'cseq': 'CSeq',
- 'call-id': 'Call-ID',
- 'www-authenticate': 'WWW-Authenticate',
-}
-
-
-def dashCapitalize(s):
- ''' Capitalize a string, making sure to treat - as a word seperator '''
- return '-'.join([ x.capitalize() for x in s.split('-')])
-
-def unq(s):
- if s[0] == s[-1] == '"':
- return s[1:-1]
- return s
-
-def DigestCalcHA1(
- pszAlg,
- pszUserName,
- pszRealm,
- pszPassword,
- pszNonce,
- pszCNonce,
-):
- m = md5()
- m.update(pszUserName)
- m.update(":")
- m.update(pszRealm)
- m.update(":")
- m.update(pszPassword)
- HA1 = m.digest()
- if pszAlg == "md5-sess":
- m = md5()
- m.update(HA1)
- m.update(":")
- m.update(pszNonce)
- m.update(":")
- m.update(pszCNonce)
- HA1 = m.digest()
- return HA1.encode('hex')
-
-
-DigestCalcHA1 = deprecated(Version("Twisted", 9, 0, 0))(DigestCalcHA1)
-
-def DigestCalcResponse(
- HA1,
- pszNonce,
- pszNonceCount,
- pszCNonce,
- pszQop,
- pszMethod,
- pszDigestUri,
- pszHEntity,
-):
- m = md5()
- m.update(pszMethod)
- m.update(":")
- m.update(pszDigestUri)
- if pszQop == "auth-int":
- m.update(":")
- m.update(pszHEntity)
- HA2 = m.digest().encode('hex')
-
- m = md5()
- m.update(HA1)
- m.update(":")
- m.update(pszNonce)
- m.update(":")
- if pszNonceCount and pszCNonce: # pszQop:
- m.update(pszNonceCount)
- m.update(":")
- m.update(pszCNonce)
- m.update(":")
- m.update(pszQop)
- m.update(":")
- m.update(HA2)
- hash = m.digest().encode('hex')
- return hash
-
-
-DigestCalcResponse = deprecated(Version("Twisted", 9, 0, 0))(DigestCalcResponse)
-
-_absent = object()
-
-class Via(object):
- """
- A L{Via} is a SIP Via header, representing a segment of the path taken by
- the request.
-
- See RFC 3261, sections 8.1.1.7, 18.2.2, and 20.42.
-
- @ivar transport: Network protocol used for this leg. (Probably either "TCP"
- or "UDP".)
- @type transport: C{str}
- @ivar branch: Unique identifier for this request.
- @type branch: C{str}
- @ivar host: Hostname or IP for this leg.
- @type host: C{str}
- @ivar port: Port used for this leg.
- @type port C{int}, or None.
- @ivar rportRequested: Whether to request RFC 3581 client processing or not.
- @type rportRequested: C{bool}
- @ivar rportValue: Servers wishing to honor requests for RFC 3581 processing
- should set this parameter to the source port the request was received
- from.
- @type rportValue: C{int}, or None.
-
- @ivar ttl: Time-to-live for requests on multicast paths.
- @type ttl: C{int}, or None.
- @ivar maddr: The destination multicast address, if any.
- @type maddr: C{str}, or None.
- @ivar hidden: Obsolete in SIP 2.0.
- @type hidden: C{bool}
- @ivar otherParams: Any other parameters in the header.
- @type otherParams: C{dict}
- """
-
- def __init__(self, host, port=PORT, transport="UDP", ttl=None,
- hidden=False, received=None, rport=_absent, branch=None,
- maddr=None, **kw):
- """
- Set parameters of this Via header. All arguments correspond to
- attributes of the same name.
-
- To maintain compatibility with old SIP
- code, the 'rport' argument is used to determine the values of
- C{rportRequested} and C{rportValue}. If None, C{rportRequested} is set
- to True. (The deprecated method for doing this is to pass True.) If an
- integer, C{rportValue} is set to the given value.
-
- Any arguments not explicitly named here are collected into the
- C{otherParams} dict.
- """
- self.transport = transport
- self.host = host
- self.port = port
- self.ttl = ttl
- self.hidden = hidden
- self.received = received
- if rport is True:
- warnings.warn(
- "rport=True is deprecated since Twisted 9.0.",
- DeprecationWarning,
- stacklevel=2)
- self.rportValue = None
- self.rportRequested = True
- elif rport is None:
- self.rportValue = None
- self.rportRequested = True
- elif rport is _absent:
- self.rportValue = None
- self.rportRequested = False
- else:
- self.rportValue = rport
- self.rportRequested = False
-
- self.branch = branch
- self.maddr = maddr
- self.otherParams = kw
-
-
- def _getrport(self):
- """
- Returns the rport value expected by the old SIP code.
- """
- if self.rportRequested == True:
- return True
- elif self.rportValue is not None:
- return self.rportValue
- else:
- return None
-
-
- def _setrport(self, newRPort):
- """
- L{Base._fixupNAT} sets C{rport} directly, so this method sets
- C{rportValue} based on that.
-
- @param newRPort: The new rport value.
- @type newRPort: C{int}
- """
- self.rportValue = newRPort
- self.rportRequested = False
-
-
- rport = property(_getrport, _setrport)
-
- def toString(self):
- """
- Serialize this header for use in a request or response.
- """
- s = "SIP/2.0/%s %s:%s" % (self.transport, self.host, self.port)
- if self.hidden:
- s += ";hidden"
- for n in "ttl", "branch", "maddr", "received":
- value = getattr(self, n)
- if value is not None:
- s += ";%s=%s" % (n, value)
- if self.rportRequested:
- s += ";rport"
- elif self.rportValue is not None:
- s += ";rport=%s" % (self.rport,)
-
- etc = self.otherParams.items()
- etc.sort()
- for k, v in etc:
- if v is None:
- s += ";" + k
- else:
- s += ";%s=%s" % (k, v)
- return s
-
-
-def parseViaHeader(value):
- """
- Parse a Via header.
-
- @return: The parsed version of this header.
- @rtype: L{Via}
- """
- parts = value.split(";")
- sent, params = parts[0], parts[1:]
- protocolinfo, by = sent.split(" ", 1)
- by = by.strip()
- result = {}
- pname, pversion, transport = protocolinfo.split("/")
- if pname != "SIP" or pversion != "2.0":
- raise ValueError, "wrong protocol or version: %r" % value
- result["transport"] = transport
- if ":" in by:
- host, port = by.split(":")
- result["port"] = int(port)
- result["host"] = host
- else:
- result["host"] = by
- for p in params:
- # it's the comment-striping dance!
- p = p.strip().split(" ", 1)
- if len(p) == 1:
- p, comment = p[0], ""
- else:
- p, comment = p
- if p == "hidden":
- result["hidden"] = True
- continue
- parts = p.split("=", 1)
- if len(parts) == 1:
- name, value = parts[0], None
- else:
- name, value = parts
- if name in ("rport", "ttl"):
- value = int(value)
- result[name] = value
- return Via(**result)
-
-
-class URL:
- """A SIP URL."""
-
- def __init__(self, host, username=None, password=None, port=None,
- transport=None, usertype=None, method=None,
- ttl=None, maddr=None, tag=None, other=None, headers=None):
- self.username = username
- self.host = host
- self.password = password
- self.port = port
- self.transport = transport
- self.usertype = usertype
- self.method = method
- self.tag = tag
- self.ttl = ttl
- self.maddr = maddr
- if other == None:
- self.other = []
- else:
- self.other = other
- if headers == None:
- self.headers = {}
- else:
- self.headers = headers
-
- def toString(self):
- l = []; w = l.append
- w("sip:")
- if self.username != None:
- w(self.username)
- if self.password != None:
- w(":%s" % self.password)
- w("@")
- w(self.host)
- if self.port != None:
- w(":%d" % self.port)
- if self.usertype != None:
- w(";user=%s" % self.usertype)
- for n in ("transport", "ttl", "maddr", "method", "tag"):
- v = getattr(self, n)
- if v != None:
- w(";%s=%s" % (n, v))
- for v in self.other:
- w(";%s" % v)
- if self.headers:
- w("?")
- w("&".join([("%s=%s" % (specialCases.get(h) or dashCapitalize(h), v)) for (h, v) in self.headers.items()]))
- return "".join(l)
-
- def __str__(self):
- return self.toString()
-
- def __repr__(self):
- return '<URL %s:%s@%s:%r/%s>' % (self.username, self.password, self.host, self.port, self.transport)
-
-
-def parseURL(url, host=None, port=None):
- """Return string into URL object.
-
- URIs are of of form 'sip:user@example.com'.
- """
- d = {}
- if not url.startswith("sip:"):
- raise ValueError("unsupported scheme: " + url[:4])
- parts = url[4:].split(";")
- userdomain, params = parts[0], parts[1:]
- udparts = userdomain.split("@", 1)
- if len(udparts) == 2:
- userpass, hostport = udparts
- upparts = userpass.split(":", 1)
- if len(upparts) == 1:
- d["username"] = upparts[0]
- else:
- d["username"] = upparts[0]
- d["password"] = upparts[1]
- else:
- hostport = udparts[0]
- hpparts = hostport.split(":", 1)
- if len(hpparts) == 1:
- d["host"] = hpparts[0]
- else:
- d["host"] = hpparts[0]
- d["port"] = int(hpparts[1])
- if host != None:
- d["host"] = host
- if port != None:
- d["port"] = port
- for p in params:
- if p == params[-1] and "?" in p:
- d["headers"] = h = {}
- p, headers = p.split("?", 1)
- for header in headers.split("&"):
- k, v = header.split("=")
- h[k] = v
- nv = p.split("=", 1)
- if len(nv) == 1:
- d.setdefault("other", []).append(p)
- continue
- name, value = nv
- if name == "user":
- d["usertype"] = value
- elif name in ("transport", "ttl", "maddr", "method", "tag"):
- if name == "ttl":
- value = int(value)
- d[name] = value
- else:
- d.setdefault("other", []).append(p)
- return URL(**d)
-
-
-def cleanRequestURL(url):
- """Clean a URL from a Request line."""
- url.transport = None
- url.maddr = None
- url.ttl = None
- url.headers = {}
-
-
-def parseAddress(address, host=None, port=None, clean=0):
- """Return (name, uri, params) for From/To/Contact header.
-
- @param clean: remove unnecessary info, usually for From and To headers.
- """
- address = address.strip()
- # simple 'sip:foo' case
- if address.startswith("sip:"):
- return "", parseURL(address, host=host, port=port), {}
- params = {}
- name, url = address.split("<", 1)
- name = name.strip()
- if name.startswith('"'):
- name = name[1:]
- if name.endswith('"'):
- name = name[:-1]
- url, paramstring = url.split(">", 1)
- url = parseURL(url, host=host, port=port)
- paramstring = paramstring.strip()
- if paramstring:
- for l in paramstring.split(";"):
- if not l:
- continue
- k, v = l.split("=")
- params[k] = v
- if clean:
- # rfc 2543 6.21
- url.ttl = None
- url.headers = {}
- url.transport = None
- url.maddr = None
- return name, url, params
-
-
-class SIPError(Exception):
- def __init__(self, code, phrase=None):
- if phrase is None:
- phrase = statusCodes[code]
- Exception.__init__(self, "SIP error (%d): %s" % (code, phrase))
- self.code = code
- self.phrase = phrase
-
-
-class RegistrationError(SIPError):
- """Registration was not possible."""
-
-
-class Message:
- """A SIP message."""
-
- length = None
-
- def __init__(self):
- self.headers = util.OrderedDict() # map name to list of values
- self.body = ""
- self.finished = 0
-
- def addHeader(self, name, value):
- name = name.lower()
- name = longHeaders.get(name, name)
- if name == "content-length":
- self.length = int(value)
- self.headers.setdefault(name,[]).append(value)
-
- def bodyDataReceived(self, data):
- self.body += data
-
- def creationFinished(self):
- if (self.length != None) and (self.length != len(self.body)):
- raise ValueError, "wrong body length"
- self.finished = 1
-
- def toString(self):
- s = "%s\r\n" % self._getHeaderLine()
- for n, vs in self.headers.items():
- for v in vs:
- s += "%s: %s\r\n" % (specialCases.get(n) or dashCapitalize(n), v)
- s += "\r\n"
- s += self.body
- return s
-
- def _getHeaderLine(self):
- raise NotImplementedError
-
-
-class Request(Message):
- """A Request for a URI"""
-
-
- def __init__(self, method, uri, version="SIP/2.0"):
- Message.__init__(self)
- self.method = method
- if isinstance(uri, URL):
- self.uri = uri
- else:
- self.uri = parseURL(uri)
- cleanRequestURL(self.uri)
-
- def __repr__(self):
- return "<SIP Request %d:%s %s>" % (id(self), self.method, self.uri.toString())
-
- def _getHeaderLine(self):
- return "%s %s SIP/2.0" % (self.method, self.uri.toString())
-
-
-class Response(Message):
- """A Response to a URI Request"""
-
- def __init__(self, code, phrase=None, version="SIP/2.0"):
- Message.__init__(self)
- self.code = code
- if phrase == None:
- phrase = statusCodes[code]
- self.phrase = phrase
-
- def __repr__(self):
- return "<SIP Response %d:%s>" % (id(self), self.code)
-
- def _getHeaderLine(self):
- return "SIP/2.0 %s %s" % (self.code, self.phrase)
-
-
-class MessagesParser(basic.LineReceiver):
- """A SIP messages parser.
-
- Expects dataReceived, dataDone repeatedly,
- in that order. Shouldn't be connected to actual transport.
- """
-
- version = "SIP/2.0"
- acceptResponses = 1
- acceptRequests = 1
- state = "firstline" # or "headers", "body" or "invalid"
-
- debug = 0
-
- def __init__(self, messageReceivedCallback):
- self.messageReceived = messageReceivedCallback
- self.reset()
-
- def reset(self, remainingData=""):
- self.state = "firstline"
- self.length = None # body length
- self.bodyReceived = 0 # how much of the body we received
- self.message = None
- self.header = None
- self.setLineMode(remainingData)
-
- def invalidMessage(self):
- self.state = "invalid"
- self.setRawMode()
-
- def dataDone(self):
- # clear out any buffered data that may be hanging around
- self.clearLineBuffer()
- if self.state == "firstline":
- return
- if self.state != "body":
- self.reset()
- return
- if self.length == None:
- # no content-length header, so end of data signals message done
- self.messageDone()
- elif self.length < self.bodyReceived:
- # aborted in the middle
- self.reset()
- else:
- # we have enough data and message wasn't finished? something is wrong
- raise RuntimeError, "this should never happen"
-
- def dataReceived(self, data):
- try:
- basic.LineReceiver.dataReceived(self, data)
- except:
- log.err()
- self.invalidMessage()
-
- def handleFirstLine(self, line):
- """Expected to create self.message."""
- raise NotImplementedError
-
- def lineLengthExceeded(self, line):
- self.invalidMessage()
-
- def lineReceived(self, line):
- if self.state == "firstline":
- while line.startswith("\n") or line.startswith("\r"):
- line = line[1:]
- if not line:
- return
- try:
- a, b, c = line.split(" ", 2)
- except ValueError:
- self.invalidMessage()
- return
- if a == "SIP/2.0" and self.acceptResponses:
- # response
- try:
- code = int(b)
- except ValueError:
- self.invalidMessage()
- return
- self.message = Response(code, c)
- elif c == "SIP/2.0" and self.acceptRequests:
- self.message = Request(a, b)
- else:
- self.invalidMessage()
- return
- self.state = "headers"
- return
- else:
- assert self.state == "headers"
- if line:
- # multiline header
- if line.startswith(" ") or line.startswith("\t"):
- name, value = self.header
- self.header = name, (value + line.lstrip())
- else:
- # new header
- if self.header:
- self.message.addHeader(*self.header)
- self.header = None
- try:
- name, value = line.split(":", 1)
- except ValueError:
- self.invalidMessage()
- return
- self.header = name, value.lstrip()
- # XXX we assume content-length won't be multiline
- if name.lower() == "content-length":
- try:
- self.length = int(value.lstrip())
- except ValueError:
- self.invalidMessage()
- return
- else:
- # CRLF, we now have message body until self.length bytes,
- # or if no length was given, until there is no more data
- # from the connection sending us data.
- self.state = "body"
- if self.header:
- self.message.addHeader(*self.header)
- self.header = None
- if self.length == 0:
- self.messageDone()
- return
- self.setRawMode()
-
- def messageDone(self, remainingData=""):
- assert self.state == "body"
- self.message.creationFinished()
- self.messageReceived(self.message)
- self.reset(remainingData)
-
- def rawDataReceived(self, data):
- assert self.state in ("body", "invalid")
- if self.state == "invalid":
- return
- if self.length == None:
- self.message.bodyDataReceived(data)
- else:
- dataLen = len(data)
- expectedLen = self.length - self.bodyReceived
- if dataLen > expectedLen:
- self.message.bodyDataReceived(data[:expectedLen])
- self.messageDone(data[expectedLen:])
- return
- else:
- self.bodyReceived += dataLen
- self.message.bodyDataReceived(data)
- if self.bodyReceived == self.length:
- self.messageDone()
-
-
-class Base(protocol.DatagramProtocol):
- """Base class for SIP clients and servers."""
-
- PORT = PORT
- debug = False
-
- def __init__(self):
- self.messages = []
- self.parser = MessagesParser(self.addMessage)
-
- def addMessage(self, msg):
- self.messages.append(msg)
-
- def datagramReceived(self, data, addr):
- self.parser.dataReceived(data)
- self.parser.dataDone()
- for m in self.messages:
- self._fixupNAT(m, addr)
- if self.debug:
- log.msg("Received %r from %r" % (m.toString(), addr))
- if isinstance(m, Request):
- self.handle_request(m, addr)
- else:
- self.handle_response(m, addr)
- self.messages[:] = []
-
- def _fixupNAT(self, message, (srcHost, srcPort)):
- # RFC 2543 6.40.2,
- senderVia = parseViaHeader(message.headers["via"][0])
- if senderVia.host != srcHost:
- senderVia.received = srcHost
- if senderVia.port != srcPort:
- senderVia.rport = srcPort
- message.headers["via"][0] = senderVia.toString()
- elif senderVia.rport == True:
- senderVia.received = srcHost
- senderVia.rport = srcPort
- message.headers["via"][0] = senderVia.toString()
-
- def deliverResponse(self, responseMessage):
- """Deliver response.
-
- Destination is based on topmost Via header."""
- destVia = parseViaHeader(responseMessage.headers["via"][0])
- # XXX we don't do multicast yet
- host = destVia.received or destVia.host
- port = destVia.rport or destVia.port or self.PORT
- destAddr = URL(host=host, port=port)
- self.sendMessage(destAddr, responseMessage)
-
- def responseFromRequest(self, code, request):
- """Create a response to a request message."""
- response = Response(code)
- for name in ("via", "to", "from", "call-id", "cseq"):
- response.headers[name] = request.headers.get(name, [])[:]
-
- return response
-
- def sendMessage(self, destURL, message):
- """Send a message.
-
- @param destURL: C{URL}. This should be a *physical* URL, not a logical one.
- @param message: The message to send.
- """
- if destURL.transport not in ("udp", None):
- raise RuntimeError, "only UDP currently supported"
- if self.debug:
- log.msg("Sending %r to %r" % (message.toString(), destURL))
- self.transport.write(message.toString(), (destURL.host, destURL.port or self.PORT))
-
- def handle_request(self, message, addr):
- """Override to define behavior for requests received
-
- @type message: C{Message}
- @type addr: C{tuple}
- """
- raise NotImplementedError
-
- def handle_response(self, message, addr):
- """Override to define behavior for responses received.
-
- @type message: C{Message}
- @type addr: C{tuple}
- """
- raise NotImplementedError
-
-
-class IContact(Interface):
- """A user of a registrar or proxy"""
-
-
-class Registration:
- def __init__(self, secondsToExpiry, contactURL):
- self.secondsToExpiry = secondsToExpiry
- self.contactURL = contactURL
-
-class IRegistry(Interface):
- """Allows registration of logical->physical URL mapping."""
-
- def registerAddress(domainURL, logicalURL, physicalURL):
- """Register the physical address of a logical URL.
-
- @return: Deferred of C{Registration} or failure with RegistrationError.
- """
-
- def unregisterAddress(domainURL, logicalURL, physicalURL):
- """Unregister the physical address of a logical URL.
-
- @return: Deferred of C{Registration} or failure with RegistrationError.
- """
-
- def getRegistrationInfo(logicalURL):
- """Get registration info for logical URL.
-
- @return: Deferred of C{Registration} object or failure of LookupError.
- """
-
-
-class ILocator(Interface):
- """Allow looking up physical address for logical URL."""
-
- def getAddress(logicalURL):
- """Return physical URL of server for logical URL of user.
-
- @param logicalURL: a logical C{URL}.
- @return: Deferred which becomes URL or fails with LookupError.
- """
-
-
-class Proxy(Base):
- """SIP proxy."""
-
- PORT = PORT
-
- locator = None # object implementing ILocator
-
- def __init__(self, host=None, port=PORT):
- """Create new instance.
-
- @param host: our hostname/IP as set in Via headers.
- @param port: our port as set in Via headers.
- """
- self.host = host or socket.getfqdn()
- self.port = port
- Base.__init__(self)
-
- def getVia(self):
- """Return value of Via header for this proxy."""
- return Via(host=self.host, port=self.port)
-
- def handle_request(self, message, addr):
- # send immediate 100/trying message before processing
- #self.deliverResponse(self.responseFromRequest(100, message))
- f = getattr(self, "handle_%s_request" % message.method, None)
- if f is None:
- f = self.handle_request_default
- try:
- d = f(message, addr)
- except SIPError, e:
- self.deliverResponse(self.responseFromRequest(e.code, message))
- except:
- log.err()
- self.deliverResponse(self.responseFromRequest(500, message))
- else:
- if d is not None:
- d.addErrback(lambda e:
- self.deliverResponse(self.responseFromRequest(e.code, message))
- )
-
- def handle_request_default(self, message, (srcHost, srcPort)):
- """Default request handler.
-
- Default behaviour for OPTIONS and unknown methods for proxies
- is to forward message on to the client.
-
- Since at the moment we are stateless proxy, thats basically
- everything.
- """
- def _mungContactHeader(uri, message):
- message.headers['contact'][0] = uri.toString()
- return self.sendMessage(uri, message)
-
- viaHeader = self.getVia()
- if viaHeader.toString() in message.headers["via"]:
- # must be a loop, so drop message
- log.msg("Dropping looped message.")
- return
-
- message.headers["via"].insert(0, viaHeader.toString())
- name, uri, tags = parseAddress(message.headers["to"][0], clean=1)
-
- # this is broken and needs refactoring to use cred
- d = self.locator.getAddress(uri)
- d.addCallback(self.sendMessage, message)
- d.addErrback(self._cantForwardRequest, message)
-
- def _cantForwardRequest(self, error, message):
- error.trap(LookupError)
- del message.headers["via"][0] # this'll be us
- self.deliverResponse(self.responseFromRequest(404, message))
-
- def deliverResponse(self, responseMessage):
- """Deliver response.
-
- Destination is based on topmost Via header."""
- destVia = parseViaHeader(responseMessage.headers["via"][0])
- # XXX we don't do multicast yet
- host = destVia.received or destVia.host
- port = destVia.rport or destVia.port or self.PORT
-
- destAddr = URL(host=host, port=port)
- self.sendMessage(destAddr, responseMessage)
-
- def responseFromRequest(self, code, request):
- """Create a response to a request message."""
- response = Response(code)
- for name in ("via", "to", "from", "call-id", "cseq"):
- response.headers[name] = request.headers.get(name, [])[:]
- return response
-
- def handle_response(self, message, addr):
- """Default response handler."""
- v = parseViaHeader(message.headers["via"][0])
- if (v.host, v.port) != (self.host, self.port):
- # we got a message not intended for us?
- # XXX note this check breaks if we have multiple external IPs
- # yay for suck protocols
- log.msg("Dropping incorrectly addressed message")
- return
- del message.headers["via"][0]
- if not message.headers["via"]:
- # this message is addressed to us
- self.gotResponse(message, addr)
- return
- self.deliverResponse(message)
-
- def gotResponse(self, message, addr):
- """Called with responses that are addressed at this server."""
- pass
-
-class IAuthorizer(Interface):
- def getChallenge(peer):
- """Generate a challenge the client may respond to.
-
- @type peer: C{tuple}
- @param peer: The client's address
-
- @rtype: C{str}
- @return: The challenge string
- """
-
- def decode(response):
- """Create a credentials object from the given response.
-
- @type response: C{str}
- """
-
-class BasicAuthorizer:
- """Authorizer for insecure Basic (base64-encoded plaintext) authentication.
-
- This form of authentication is broken and insecure. Do not use it.
- """
-
- implements(IAuthorizer)
-
- def __init__(self):
- """
- This method exists solely to issue a deprecation warning.
- """
- warnings.warn(
- "twisted.protocols.sip.BasicAuthorizer was deprecated "
- "in Twisted 9.0.0",
- category=DeprecationWarning,
- stacklevel=2)
-
-
- def getChallenge(self, peer):
- return None
-
- def decode(self, response):
- # At least one SIP client improperly pads its Base64 encoded messages
- for i in range(3):
- try:
- creds = (response + ('=' * i)).decode('base64')
- except:
- pass
- else:
- break
- else:
- # Totally bogus
- raise SIPError(400)
- p = creds.split(':', 1)
- if len(p) == 2:
- return UsernamePassword(*p)
- raise SIPError(400)
-
-
-
-class DigestedCredentials(UsernameHashedPassword):
- """Yet Another Simple Digest-MD5 authentication scheme"""
-
- def __init__(self, username, fields, challenges):
- warnings.warn(
- "twisted.protocols.sip.DigestedCredentials was deprecated "
- "in Twisted 9.0.0",
- category=DeprecationWarning,
- stacklevel=2)
- self.username = username
- self.fields = fields
- self.challenges = challenges
-
- def checkPassword(self, password):
- method = 'REGISTER'
- response = self.fields.get('response')
- uri = self.fields.get('uri')
- nonce = self.fields.get('nonce')
- cnonce = self.fields.get('cnonce')
- nc = self.fields.get('nc')
- algo = self.fields.get('algorithm', 'MD5')
- qop = self.fields.get('qop-options', 'auth')
- opaque = self.fields.get('opaque')
-
- if opaque not in self.challenges:
- return False
- del self.challenges[opaque]
-
- user, domain = self.username.split('@', 1)
- if uri is None:
- uri = 'sip:' + domain
-
- expected = DigestCalcResponse(
- DigestCalcHA1(algo, user, domain, password, nonce, cnonce),
- nonce, nc, cnonce, qop, method, uri, None,
- )
-
- return expected == response
-
-class DigestAuthorizer:
- CHALLENGE_LIFETIME = 15
-
- implements(IAuthorizer)
-
- def __init__(self):
- warnings.warn(
- "twisted.protocols.sip.DigestAuthorizer was deprecated "
- "in Twisted 9.0.0",
- category=DeprecationWarning,
- stacklevel=2)
-
- self.outstanding = {}
-
-
-
- def generateNonce(self):
- c = tuple([random.randrange(sys.maxint) for _ in range(3)])
- c = '%d%d%d' % c
- return c
-
- def generateOpaque(self):
- return str(random.randrange(sys.maxint))
-
- def getChallenge(self, peer):
- c = self.generateNonce()
- o = self.generateOpaque()
- self.outstanding[o] = c
- return ','.join((
- 'nonce="%s"' % c,
- 'opaque="%s"' % o,
- 'qop-options="auth"',
- 'algorithm="MD5"',
- ))
-
- def decode(self, response):
- response = ' '.join(response.splitlines())
- parts = response.split(',')
- auth = dict([(k.strip(), unq(v.strip())) for (k, v) in [p.split('=', 1) for p in parts]])
- try:
- username = auth['username']
- except KeyError:
- raise SIPError(401)
- try:
- return DigestedCredentials(username, auth, self.outstanding)
- except:
- raise SIPError(400)
-
-
-class RegisterProxy(Proxy):
- """A proxy that allows registration for a specific domain.
-
- Unregistered users won't be handled.
- """
-
- portal = None
-
- registry = None # should implement IRegistry
-
- authorizers = {}
-
- def __init__(self, *args, **kw):
- Proxy.__init__(self, *args, **kw)
- self.liveChallenges = {}
- if "digest" not in self.authorizers:
- self.authorizers["digest"] = DigestAuthorizer()
-
- def handle_ACK_request(self, message, (host, port)):
- # XXX
- # ACKs are a client's way of indicating they got the last message
- # Responding to them is not a good idea.
- # However, we should keep track of terminal messages and re-transmit
- # if no ACK is received.
- pass
-
- def handle_REGISTER_request(self, message, (host, port)):
- """Handle a registration request.
-
- Currently registration is not proxied.
- """
- if self.portal is None:
- # There is no portal. Let anyone in.
- self.register(message, host, port)
- else:
- # There is a portal. Check for credentials.
- if not message.headers.has_key("authorization"):
- return self.unauthorized(message, host, port)
- else:
- return self.login(message, host, port)
-
- def unauthorized(self, message, host, port):
- m = self.responseFromRequest(401, message)
- for (scheme, auth) in self.authorizers.iteritems():
- chal = auth.getChallenge((host, port))
- if chal is None:
- value = '%s realm="%s"' % (scheme.title(), self.host)
- else:
- value = '%s %s,realm="%s"' % (scheme.title(), chal, self.host)
- m.headers.setdefault('www-authenticate', []).append(value)
- self.deliverResponse(m)
-
-
- def login(self, message, host, port):
- parts = message.headers['authorization'][0].split(None, 1)
- a = self.authorizers.get(parts[0].lower())
- if a:
- try:
- c = a.decode(parts[1])
- except SIPError:
- raise
- except:
- log.err()
- self.deliverResponse(self.responseFromRequest(500, message))
- else:
- c.username += '@' + self.host
- self.portal.login(c, None, IContact
- ).addCallback(self._cbLogin, message, host, port
- ).addErrback(self._ebLogin, message, host, port
- ).addErrback(log.err
- )
- else:
- self.deliverResponse(self.responseFromRequest(501, message))
-
- def _cbLogin(self, (i, a, l), message, host, port):
- # It's stateless, matey. What a joke.
- self.register(message, host, port)
-
- def _ebLogin(self, failure, message, host, port):
- failure.trap(cred.error.UnauthorizedLogin)
- self.unauthorized(message, host, port)
-
- def register(self, message, host, port):
- """Allow all users to register"""
- name, toURL, params = parseAddress(message.headers["to"][0], clean=1)
- contact = None
- if message.headers.has_key("contact"):
- contact = message.headers["contact"][0]
-
- if message.headers.get("expires", [None])[0] == "0":
- self.unregister(message, toURL, contact)
- else:
- # XXX Check expires on appropriate URL, and pass it to registry
- # instead of having registry hardcode it.
- if contact is not None:
- name, contactURL, params = parseAddress(contact, host=host, port=port)
- d = self.registry.registerAddress(message.uri, toURL, contactURL)
- else:
- d = self.registry.getRegistrationInfo(toURL)
- d.addCallbacks(self._cbRegister, self._ebRegister,
- callbackArgs=(message,),
- errbackArgs=(message,)
- )
-
- def _cbRegister(self, registration, message):
- response = self.responseFromRequest(200, message)
- if registration.contactURL != None:
- response.addHeader("contact", registration.contactURL.toString())
- response.addHeader("expires", "%d" % registration.secondsToExpiry)
- response.addHeader("content-length", "0")
- self.deliverResponse(response)
-
- def _ebRegister(self, error, message):
- error.trap(RegistrationError, LookupError)
- # XXX return error message, and alter tests to deal with
- # this, currently tests assume no message sent on failure
-
- def unregister(self, message, toURL, contact):
- try:
- expires = int(message.headers["expires"][0])
- except ValueError:
- self.deliverResponse(self.responseFromRequest(400, message))
- else:
- if expires == 0:
- if contact == "*":
- contactURL = "*"
- else:
- name, contactURL, params = parseAddress(contact)
- d = self.registry.unregisterAddress(message.uri, toURL, contactURL)
- d.addCallback(self._cbUnregister, message
- ).addErrback(self._ebUnregister, message
- )
-
- def _cbUnregister(self, registration, message):
- msg = self.responseFromRequest(200, message)
- msg.headers.setdefault('contact', []).append(registration.contactURL.toString())
- msg.addHeader("expires", "0")
- self.deliverResponse(msg)
-
- def _ebUnregister(self, registration, message):
- pass
-
-
-class InMemoryRegistry:
- """A simplistic registry for a specific domain."""
-
- implements(IRegistry, ILocator)
-
- def __init__(self, domain):
- self.domain = domain # the domain we handle registration for
- self.users = {} # map username to (IDelayedCall for expiry, address URI)
-
- def getAddress(self, userURI):
- if userURI.host != self.domain:
- return defer.fail(LookupError("unknown domain"))
- if userURI.username in self.users:
- dc, url = self.users[userURI.username]
- return defer.succeed(url)
- else:
- return defer.fail(LookupError("no such user"))
-
- def getRegistrationInfo(self, userURI):
- if userURI.host != self.domain:
- return defer.fail(LookupError("unknown domain"))
- if self.users.has_key(userURI.username):
- dc, url = self.users[userURI.username]
- return defer.succeed(Registration(int(dc.getTime() - time.time()), url))
- else:
- return defer.fail(LookupError("no such user"))
-
- def _expireRegistration(self, username):
- try:
- dc, url = self.users[username]
- except KeyError:
- return defer.fail(LookupError("no such user"))
- else:
- dc.cancel()
- del self.users[username]
- return defer.succeed(Registration(0, url))
-
- def registerAddress(self, domainURL, logicalURL, physicalURL):
- if domainURL.host != self.domain:
- log.msg("Registration for domain we don't handle.")
- return defer.fail(RegistrationError(404))
- if logicalURL.host != self.domain:
- log.msg("Registration for domain we don't handle.")
- return defer.fail(RegistrationError(404))
- if logicalURL.username in self.users:
- dc, old = self.users[logicalURL.username]
- dc.reset(3600)
- else:
- dc = reactor.callLater(3600, self._expireRegistration, logicalURL.username)
- log.msg("Registered %s at %s" % (logicalURL.toString(), physicalURL.toString()))
- self.users[logicalURL.username] = (dc, physicalURL)
- return defer.succeed(Registration(int(dc.getTime() - time.time()), physicalURL))
-
- def unregisterAddress(self, domainURL, logicalURL, physicalURL):
- return self._expireRegistration(logicalURL.username)
diff --git a/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/socks.py b/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/socks.py
deleted file mode 100755
index 445b9f35..00000000
--- a/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/socks.py
+++ /dev/null
@@ -1,240 +0,0 @@
-# -*- test-case-name: twisted.test.test_socks -*-
-# Copyright (c) Twisted Matrix Laboratories.
-# See LICENSE for details.
-
-"""
-Implementation of the SOCKSv4 protocol.
-"""
-
-# python imports
-import struct
-import string
-import socket
-import time
-
-# twisted imports
-from twisted.internet import reactor, protocol, defer
-from twisted.python import log
-
-
-class SOCKSv4Outgoing(protocol.Protocol):
-
- def __init__(self,socks):
- self.socks=socks
-
- def connectionMade(self):
- peer = self.transport.getPeer()
- self.socks.makeReply(90, 0, port=peer.port, ip=peer.host)
- self.socks.otherConn=self
-
- def connectionLost(self, reason):
- self.socks.transport.loseConnection()
-
- def dataReceived(self,data):
- self.socks.write(data)
-
- def write(self,data):
- self.socks.log(self,data)
- self.transport.write(data)
-
-
-
-class SOCKSv4Incoming(protocol.Protocol):
-
- def __init__(self,socks):
- self.socks=socks
- self.socks.otherConn=self
-
- def connectionLost(self, reason):
- self.socks.transport.loseConnection()
-
- def dataReceived(self,data):
- self.socks.write(data)
-
- def write(self,data):
- self.socks.log(self,data)
- self.transport.write(data)
-
-
-class SOCKSv4(protocol.Protocol):
- """
- An implementation of the SOCKSv4 protocol.
-
- @type logging: C{str} or C{None}
- @ivar logging: If not C{None}, the name of the logfile to which connection
- information will be written.
-
- @type reactor: object providing L{twisted.internet.interfaces.IReactorTCP}
- @ivar reactor: The reactor used to create connections.
-
- @type buf: C{str}
- @ivar buf: Part of a SOCKSv4 connection request.
-
- @type otherConn: C{SOCKSv4Incoming}, C{SOCKSv4Outgoing} or C{None}
- @ivar otherConn: Until the connection has been established, C{otherConn} is
- C{None}. After that, it is the proxy-to-destination protocol instance
- along which the client's connection is being forwarded.
- """
- def __init__(self, logging=None, reactor=reactor):
- self.logging = logging
- self.reactor = reactor
-
- def connectionMade(self):
- self.buf = ""
- self.otherConn = None
-
- def dataReceived(self, data):
- """
- Called whenever data is received.
-
- @type data: C{str}
- @param data: Part or all of a SOCKSv4 packet.
- """
- if self.otherConn:
- self.otherConn.write(data)
- return
- self.buf = self.buf + data
- completeBuffer = self.buf
- if "\000" in self.buf[8:]:
- head, self.buf = self.buf[:8], self.buf[8:]
- version, code, port = struct.unpack("!BBH", head[:4])
- user, self.buf = self.buf.split("\000", 1)
- if head[4:7] == "\000\000\000" and head[7] != "\000":
- # An IP address of the form 0.0.0.X, where X is non-zero,
- # signifies that this is a SOCKSv4a packet.
- # If the complete packet hasn't been received, restore the
- # buffer and wait for it.
- if "\000" not in self.buf:
- self.buf = completeBuffer
- return
- server, self.buf = self.buf.split("\000", 1)
- d = self.reactor.resolve(server)
- d.addCallback(self._dataReceived2, user,
- version, code, port)
- d.addErrback(lambda result, self = self: self.makeReply(91))
- return
- else:
- server = socket.inet_ntoa(head[4:8])
-
- self._dataReceived2(server, user, version, code, port)
-
- def _dataReceived2(self, server, user, version, code, port):
- """
- The second half of the SOCKS connection setup. For a SOCKSv4 packet this
- is after the server address has been extracted from the header. For a
- SOCKSv4a packet this is after the host name has been resolved.
-
- @type server: C{str}
- @param server: The IP address of the destination, represented as a
- dotted quad.
-
- @type user: C{str}
- @param user: The username associated with the connection.
-
- @type version: C{int}
- @param version: The SOCKS protocol version number.
-
- @type code: C{int}
- @param code: The comand code. 1 means establish a TCP/IP stream
- connection, and 2 means establish a TCP/IP port binding.
-
- @type port: C{int}
- @param port: The port number associated with the connection.
- """
- assert version == 4, "Bad version code: %s" % version
- if not self.authorize(code, server, port, user):
- self.makeReply(91)
- return
- if code == 1: # CONNECT
- d = self.connectClass(server, port, SOCKSv4Outgoing, self)
- d.addErrback(lambda result, self = self: self.makeReply(91))
- elif code == 2: # BIND
- d = self.listenClass(0, SOCKSv4IncomingFactory, self, server)
- d.addCallback(lambda (h, p),
- self = self: self.makeReply(90, 0, p, h))
- else:
- raise RuntimeError, "Bad Connect Code: %s" % code
- assert self.buf == "", "hmm, still stuff in buffer... %s" % repr(
- self.buf)
-
- def connectionLost(self, reason):
- if self.otherConn:
- self.otherConn.transport.loseConnection()
-
- def authorize(self,code,server,port,user):
- log.msg("code %s connection to %s:%s (user %s) authorized" % (code,server,port,user))
- return 1
-
- def connectClass(self, host, port, klass, *args):
- return protocol.ClientCreator(reactor, klass, *args).connectTCP(host,port)
-
- def listenClass(self, port, klass, *args):
- serv = reactor.listenTCP(port, klass(*args))
- return defer.succeed(serv.getHost()[1:])
-
- def makeReply(self,reply,version=0,port=0,ip="0.0.0.0"):
- self.transport.write(struct.pack("!BBH",version,reply,port)+socket.inet_aton(ip))
- if reply!=90: self.transport.loseConnection()
-
- def write(self,data):
- self.log(self,data)
- self.transport.write(data)
-
- def log(self,proto,data):
- if not self.logging: return
- peer = self.transport.getPeer()
- their_peer = self.otherConn.transport.getPeer()
- f=open(self.logging,"a")
- f.write("%s\t%s:%d %s %s:%d\n"%(time.ctime(),
- peer.host,peer.port,
- ((proto==self and '<') or '>'),
- their_peer.host,their_peer.port))
- while data:
- p,data=data[:16],data[16:]
- f.write(string.join(map(lambda x:'%02X'%ord(x),p),' ')+' ')
- f.write((16-len(p))*3*' ')
- for c in p:
- if len(repr(c))>3: f.write('.')
- else: f.write(c)
- f.write('\n')
- f.write('\n')
- f.close()
-
-
-
-class SOCKSv4Factory(protocol.Factory):
- """
- A factory for a SOCKSv4 proxy.
-
- Constructor accepts one argument, a log file name.
- """
-
- def __init__(self, log):
- self.logging = log
-
- def buildProtocol(self, addr):
- return SOCKSv4(self.logging, reactor)
-
-
-
-class SOCKSv4IncomingFactory(protocol.Factory):
- """
- A utility class for building protocols for incoming connections.
- """
-
- def __init__(self, socks, ip):
- self.socks = socks
- self.ip = ip
-
-
- def buildProtocol(self, addr):
- if addr[0] == self.ip:
- self.ip = ""
- self.socks.makeReply(90, 0)
- return SOCKSv4Incoming(self.socks)
- elif self.ip == "":
- return None
- else:
- self.socks.makeReply(91, 0)
- self.ip = ""
- return None
diff --git a/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/stateful.py b/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/stateful.py
deleted file mode 100755
index 7b82ae3d..00000000
--- a/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/stateful.py
+++ /dev/null
@@ -1,52 +0,0 @@
-# -*- test-case-name: twisted.test.test_stateful -*-
-
-# Copyright (c) Twisted Matrix Laboratories.
-# See LICENSE for details.
-
-
-from twisted.internet import protocol
-
-try:
- from cStringIO import StringIO
-except ImportError:
- from StringIO import StringIO
-
-class StatefulProtocol(protocol.Protocol):
- """A Protocol that stores state for you.
-
- state is a pair (function, num_bytes). When num_bytes bytes of data arrives
- from the network, function is called. It is expected to return the next
- state or None to keep same state. Initial state is returned by
- getInitialState (override it).
- """
- _sful_data = None, None, 0
-
- def makeConnection(self, transport):
- protocol.Protocol.makeConnection(self, transport)
- self._sful_data = self.getInitialState(), StringIO(), 0
-
- def getInitialState(self):
- raise NotImplementedError
-
- def dataReceived(self, data):
- state, buffer, offset = self._sful_data
- buffer.seek(0, 2)
- buffer.write(data)
- blen = buffer.tell() # how many bytes total is in the buffer
- buffer.seek(offset)
- while blen - offset >= state[1]:
- d = buffer.read(state[1])
- offset += state[1]
- next = state[0](d)
- if self.transport.disconnecting: # XXX: argh stupid hack borrowed right from LineReceiver
- return # dataReceived won't be called again, so who cares about consistent state
- if next:
- state = next
- if offset != 0:
- b = buffer.read()
- buffer.seek(0)
- buffer.truncate()
- buffer.write(b)
- offset = 0
- self._sful_data = state, buffer, offset
-
diff --git a/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/telnet.py b/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/telnet.py
deleted file mode 100755
index ba1c8263..00000000
--- a/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/telnet.py
+++ /dev/null
@@ -1,325 +0,0 @@
-
-# Copyright (c) Twisted Matrix Laboratories.
-# See LICENSE for details.
-
-
-"""TELNET implementation, with line-oriented command handling.
-"""
-
-import warnings
-warnings.warn(
- "As of Twisted 2.1, twisted.protocols.telnet is deprecated. "
- "See twisted.conch.telnet for the current, supported API.",
- DeprecationWarning,
- stacklevel=2)
-
-
-# System Imports
-try:
- from cStringIO import StringIO
-except ImportError:
- from StringIO import StringIO
-
-# Twisted Imports
-from twisted import copyright
-from twisted.internet import protocol
-
-# Some utility chars.
-ESC = chr(27) # ESC for doing fanciness
-BOLD_MODE_ON = ESC+"[1m" # turn bold on
-BOLD_MODE_OFF= ESC+"[m" # no char attributes
-
-
-# Characters gleaned from the various (and conflicting) RFCs. Not all of these are correct.
-
-NULL = chr(0) # No operation.
-LF = chr(10) # Moves the printer to the
- # next print line, keeping the
- # same horizontal position.
-CR = chr(13) # Moves the printer to the left
- # margin of the current line.
-BEL = chr(7) # Produces an audible or
- # visible signal (which does
- # NOT move the print head).
-BS = chr(8) # Moves the print head one
- # character position towards
- # the left margin.
-HT = chr(9) # Moves the printer to the
- # next horizontal tab stop.
- # It remains unspecified how
- # either party determines or
- # establishes where such tab
- # stops are located.
-VT = chr(11) # Moves the printer to the
- # next vertical tab stop. It
- # remains unspecified how
- # either party determines or
- # establishes where such tab
- # stops are located.
-FF = chr(12) # Moves the printer to the top
- # of the next page, keeping
- # the same horizontal position.
-SE = chr(240) # End of subnegotiation parameters.
-NOP= chr(241) # No operation.
-DM = chr(242) # "Data Mark": The data stream portion
- # of a Synch. This should always be
- # accompanied by a TCP Urgent
- # notification.
-BRK= chr(243) # NVT character Break.
-IP = chr(244) # The function Interrupt Process.
-AO = chr(245) # The function Abort Output
-AYT= chr(246) # The function Are You There.
-EC = chr(247) # The function Erase Character.
-EL = chr(248) # The function Erase Line
-GA = chr(249) # The Go Ahead signal.
-SB = chr(250) # Indicates that what follows is
- # subnegotiation of the indicated
- # option.
-WILL = chr(251) # Indicates the desire to begin
- # performing, or confirmation that
- # you are now performing, the
- # indicated option.
-WONT = chr(252) # Indicates the refusal to perform,
- # or continue performing, the
- # indicated option.
-DO = chr(253) # Indicates the request that the
- # other party perform, or
- # confirmation that you are expecting
- # the other party to perform, the
- # indicated option.
-DONT = chr(254) # Indicates the demand that the
- # other party stop performing,
- # or confirmation that you are no
- # longer expecting the other party
- # to perform, the indicated option.
-IAC = chr(255) # Data Byte 255.
-
-# features
-
-ECHO = chr(1) # User-to-Server: Asks the server to send
- # Echos of the transmitted data.
-
- # Server-to User: States that the server is
- # sending echos of the transmitted data.
- # Sent only as a reply to ECHO or NO ECHO.
-
-SUPGA = chr(3) # Supress Go Ahead...? "Modern" telnet servers
- # are supposed to do this.
-
-LINEMODE = chr(34) # I don't care that Jon Postel is dead.
-
-HIDE = chr(133) # The intention is that a server will send
- # this signal to a user system which is
- # echoing locally (to the user) when the user
- # is about to type something secret (e.g. a
- # password). In this case, the user system
- # is to suppress local echoing or overprint
- # the input (or something) until the server
- # sends a NOECHO signal. In situations where
- # the user system is not echoing locally,
- # this signal must not be sent by the server.
-
-
-NOECHO= chr(131) # User-to-Server: Asks the server not to
- # return Echos of the transmitted data.
- #
- # Server-to-User: States that the server is
- # not sending echos of the transmitted data.
- # Sent only as a reply to ECHO or NO ECHO,
- # or to end the hide your input.
-
-
-
-iacBytes = {
- DO: 'DO',
- DONT: 'DONT',
- WILL: 'WILL',
- WONT: 'WONT',
- IP: 'IP'
- }
-
-def multireplace(st, dct):
- for k, v in dct.items():
- st = st.replace(k, v)
- return st
-
-class Telnet(protocol.Protocol):
- """I am a Protocol for handling Telnet connections. I have two
- sets of special methods, telnet_* and iac_*.
-
- telnet_* methods get called on every line sent to me. The method
- to call is decided by the current mode. The initial mode is 'User';
- this means that telnet_User is the first telnet_* method to be called.
- All telnet_* methods should return a string which specifies the mode
- to go into next; thus dictating which telnet_* method to call next.
- For example, the default telnet_User method returns 'Password' to go
- into Password mode, and the default telnet_Password method returns
- 'Command' to go into Command mode.
-
- The iac_* methods are less-used; they are called when an IAC telnet
- byte is received. You can define iac_DO, iac_DONT, iac_WILL, iac_WONT,
- and iac_IP methods to do what you want when one of these bytes is
- received."""
-
-
- gotIAC = 0
- iacByte = None
- lastLine = None
- buffer = ''
- echo = 0
- delimiters = ['\r\n', '\r\000']
- mode = "User"
-
- def write(self, data):
- """Send the given data over my transport."""
- self.transport.write(data)
-
-
- def connectionMade(self):
- """I will write a welcomeMessage and loginPrompt to the client."""
- self.write(self.welcomeMessage() + self.loginPrompt())
-
- def welcomeMessage(self):
- """Override me to return a string which will be sent to the client
- before login."""
- x = self.factory.__class__
- return ("\r\n" + x.__module__ + '.' + x.__name__ +
- '\r\nTwisted %s\r\n' % copyright.version
- )
-
- def loginPrompt(self):
- """Override me to return a 'login:'-type prompt."""
- return "username: "
-
- def iacSBchunk(self, chunk):
- pass
-
- def iac_DO(self, feature):
- pass
-
- def iac_DONT(self, feature):
- pass
-
- def iac_WILL(self, feature):
- pass
-
- def iac_WONT(self, feature):
- pass
-
- def iac_IP(self, feature):
- pass
-
- def processLine(self, line):
- """I call a method that looks like 'telnet_*' where '*' is filled
- in by the current mode. telnet_* methods should return a string which
- will become the new mode. If None is returned, the mode will not change.
- """
- mode = getattr(self, "telnet_"+self.mode)(line)
- if mode is not None:
- self.mode = mode
-
- def telnet_User(self, user):
- """I take a username, set it to the 'self.username' attribute,
- print out a password prompt, and switch to 'Password' mode. If
- you want to do something else when the username is received (ie,
- create a new user if the user doesn't exist), override me."""
- self.username = user
- self.write(IAC+WILL+ECHO+"password: ")
- return "Password"
-
- def telnet_Password(self, paswd):
- """I accept a password as an argument, and check it with the
- checkUserAndPass method. If the login is successful, I call
- loggedIn()."""
- self.write(IAC+WONT+ECHO+"*****\r\n")
- try:
- checked = self.checkUserAndPass(self.username, paswd)
- except:
- return "Done"
- if not checked:
- return "Done"
- self.loggedIn()
- return "Command"
-
- def telnet_Command(self, cmd):
- """The default 'command processing' mode. You probably want to
- override me."""
- return "Command"
-
- def processChunk(self, chunk):
- """I take a chunk of data and delegate out to telnet_* methods
- by way of processLine. If the current mode is 'Done', I'll close
- the connection. """
- self.buffer = self.buffer + chunk
-
- #yech.
- for delim in self.delimiters:
- idx = self.buffer.find(delim)
- if idx != -1:
- break
-
- while idx != -1:
- buf, self.buffer = self.buffer[:idx], self.buffer[idx+2:]
- self.processLine(buf)
- if self.mode == 'Done':
- self.transport.loseConnection()
-
- for delim in self.delimiters:
- idx = self.buffer.find(delim)
- if idx != -1:
- break
-
- def dataReceived(self, data):
- chunk = StringIO()
- # silly little IAC state-machine
- for char in data:
- if self.gotIAC:
- # working on an IAC request state
- if self.iacByte:
- # we're in SB mode, getting a chunk
- if self.iacByte == SB:
- if char == SE:
- self.iacSBchunk(chunk.getvalue())
- chunk = StringIO()
- del self.iacByte
- del self.gotIAC
- else:
- chunk.write(char)
- else:
- # got all I need to know state
- try:
- getattr(self, 'iac_%s' % iacBytes[self.iacByte])(char)
- except KeyError:
- pass
- del self.iacByte
- del self.gotIAC
- else:
- # got IAC, this is my W/W/D/D (or perhaps sb)
- self.iacByte = char
- elif char == IAC:
- # Process what I've got so far before going into
- # the IAC state; don't want to process characters
- # in an inconsistent state with what they were
- # received in.
- c = chunk.getvalue()
- if c:
- why = self.processChunk(c)
- if why:
- return why
- chunk = StringIO()
- self.gotIAC = 1
- else:
- chunk.write(char)
- # chunks are of a relatively indeterminate size.
- c = chunk.getvalue()
- if c:
- why = self.processChunk(c)
- if why:
- return why
-
- def loggedIn(self):
- """Called after the user succesfully logged in.
-
- Override in subclasses.
- """
- pass
diff --git a/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/test/__init__.py b/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/test/__init__.py
deleted file mode 100755
index fd1e0588..00000000
--- a/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/test/__init__.py
+++ /dev/null
@@ -1,6 +0,0 @@
-# Copyright (c) Twisted Matrix Laboratories.
-# See LICENSE for details.
-
-"""
-Unit tests for L{twisted.protocols}.
-"""
diff --git a/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/test/test_tls.py b/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/test/test_tls.py
deleted file mode 100755
index 6227d02a..00000000
--- a/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/test/test_tls.py
+++ /dev/null
@@ -1,1499 +0,0 @@
-# Copyright (c) Twisted Matrix Laboratories.
-# See LICENSE for details.
-
-"""
-Tests for L{twisted.protocols.tls}.
-"""
-
-from zope.interface.verify import verifyObject
-from zope.interface import Interface, directlyProvides
-
-try:
- from twisted.protocols.tls import TLSMemoryBIOProtocol, TLSMemoryBIOFactory
- from twisted.protocols.tls import _PullToPush, _ProducerMembrane
-except ImportError:
- # Skip the whole test module if it can't be imported.
- skip = "pyOpenSSL 0.10 or newer required for twisted.protocol.tls"
-else:
- # Otherwise, the pyOpenSSL dependency must be satisfied, so all these
- # imports will work.
- from OpenSSL.crypto import X509Type
- from OpenSSL.SSL import TLSv1_METHOD, Error, Context, ConnectionType, WantReadError
- from twisted.internet.ssl import ClientContextFactory, PrivateCertificate
- from twisted.internet.ssl import DefaultOpenSSLContextFactory
-
-from twisted.python.filepath import FilePath
-from twisted.python.failure import Failure
-from twisted.python import log
-from twisted.internet.interfaces import ISystemHandle, ISSLTransport
-from twisted.internet.interfaces import IPushProducer
-from twisted.internet.error import ConnectionDone, ConnectionLost
-from twisted.internet.defer import Deferred, gatherResults
-from twisted.internet.protocol import Protocol, ClientFactory, ServerFactory
-from twisted.internet.task import TaskStopped
-from twisted.protocols.loopback import loopbackAsync, collapsingPumpPolicy
-from twisted.trial.unittest import TestCase
-from twisted.test.test_tcp import ConnectionLostNotifyingProtocol
-from twisted.test.test_ssl import certPath
-from twisted.test.proto_helpers import StringTransport
-
-
-class HandshakeCallbackContextFactory:
- """
- L{HandshakeCallbackContextFactory} is a factory for SSL contexts which
- allows applications to get notification when the SSL handshake completes.
-
- @ivar _finished: A L{Deferred} which will be called back when the handshake
- is done.
- """
- # pyOpenSSL needs to expose this.
- # https://bugs.launchpad.net/pyopenssl/+bug/372832
- SSL_CB_HANDSHAKE_DONE = 0x20
-
- def __init__(self):
- self._finished = Deferred()
-
-
- def factoryAndDeferred(cls):
- """
- Create a new L{HandshakeCallbackContextFactory} and return a two-tuple
- of it and a L{Deferred} which will fire when a connection created with
- it completes a TLS handshake.
- """
- contextFactory = cls()
- return contextFactory, contextFactory._finished
- factoryAndDeferred = classmethod(factoryAndDeferred)
-
-
- def _info(self, connection, where, ret):
- """
- This is the "info callback" on the context. It will be called
- periodically by pyOpenSSL with information about the state of a
- connection. When it indicates the handshake is complete, it will fire
- C{self._finished}.
- """
- if where & self.SSL_CB_HANDSHAKE_DONE:
- self._finished.callback(None)
-
-
- def getContext(self):
- """
- Create and return an SSL context configured to use L{self._info} as the
- info callback.
- """
- context = Context(TLSv1_METHOD)
- context.set_info_callback(self._info)
- return context
-
-
-
-class AccumulatingProtocol(Protocol):
- """
- A protocol which collects the bytes it receives and closes its connection
- after receiving a certain minimum of data.
-
- @ivar howMany: The number of bytes of data to wait for before closing the
- connection.
-
- @ivar receiving: A C{list} of C{str} of the bytes received so far.
- """
- def __init__(self, howMany):
- self.howMany = howMany
-
-
- def connectionMade(self):
- self.received = []
-
-
- def dataReceived(self, bytes):
- self.received.append(bytes)
- if sum(map(len, self.received)) >= self.howMany:
- self.transport.loseConnection()
-
-
- def connectionLost(self, reason):
- if not reason.check(ConnectionDone):
- log.err(reason)
-
-
-
-def buildTLSProtocol(server=False, transport=None):
- """
- Create a protocol hooked up to a TLS transport hooked up to a
- StringTransport.
- """
- # We want to accumulate bytes without disconnecting, so set high limit:
- clientProtocol = AccumulatingProtocol(999999999999)
- clientFactory = ClientFactory()
- clientFactory.protocol = lambda: clientProtocol
-
- if server:
- contextFactory = DefaultOpenSSLContextFactory(certPath, certPath)
- else:
- contextFactory = ClientContextFactory()
- wrapperFactory = TLSMemoryBIOFactory(
- contextFactory, not server, clientFactory)
- sslProtocol = wrapperFactory.buildProtocol(None)
-
- if transport is None:
- transport = StringTransport()
- sslProtocol.makeConnection(transport)
- return clientProtocol, sslProtocol
-
-
-
-class TLSMemoryBIOFactoryTests(TestCase):
- """
- Ensure TLSMemoryBIOFactory logging acts correctly.
- """
-
- def test_quiet(self):
- """
- L{TLSMemoryBIOFactory.doStart} and L{TLSMemoryBIOFactory.doStop} do
- not log any messages.
- """
- contextFactory = DefaultOpenSSLContextFactory(certPath, certPath)
-
- logs = []
- logger = logs.append
- log.addObserver(logger)
- self.addCleanup(log.removeObserver, logger)
- wrappedFactory = ServerFactory()
- # Disable logging on the wrapped factory:
- wrappedFactory.doStart = lambda: None
- wrappedFactory.doStop = lambda: None
- factory = TLSMemoryBIOFactory(contextFactory, False, wrappedFactory)
- factory.doStart()
- factory.doStop()
- self.assertEqual(logs, [])
-
-
- def test_logPrefix(self):
- """
- L{TLSMemoryBIOFactory.logPrefix} amends the wrapped factory's log prefix
- with a short string (C{"TLS"}) indicating the wrapping, rather than its
- full class name.
- """
- contextFactory = DefaultOpenSSLContextFactory(certPath, certPath)
- factory = TLSMemoryBIOFactory(contextFactory, False, ServerFactory())
- self.assertEqual("ServerFactory (TLS)", factory.logPrefix())
-
-
- def test_logPrefixFallback(self):
- """
- If the wrapped factory does not provide L{ILoggingContext},
- L{TLSMemoryBIOFactory.logPrefix} uses the wrapped factory's class name.
- """
- class NoFactory(object):
- pass
-
- contextFactory = DefaultOpenSSLContextFactory(certPath, certPath)
- factory = TLSMemoryBIOFactory(contextFactory, False, NoFactory())
- self.assertEqual("NoFactory (TLS)", factory.logPrefix())
-
-
-
-class TLSMemoryBIOTests(TestCase):
- """
- Tests for the implementation of L{ISSLTransport} which runs over another
- L{ITransport}.
- """
-
- def test_interfaces(self):
- """
- L{TLSMemoryBIOProtocol} instances provide L{ISSLTransport} and
- L{ISystemHandle}.
- """
- proto = TLSMemoryBIOProtocol(None, None)
- self.assertTrue(ISSLTransport.providedBy(proto))
- self.assertTrue(ISystemHandle.providedBy(proto))
-
-
- def test_wrappedProtocolInterfaces(self):
- """
- L{TLSMemoryBIOProtocol} instances provide the interfaces provided by
- the transport they wrap.
- """
- class ITransport(Interface):
- pass
-
- class MyTransport(object):
- def write(self, bytes):
- pass
-
- clientFactory = ClientFactory()
- contextFactory = ClientContextFactory()
- wrapperFactory = TLSMemoryBIOFactory(
- contextFactory, True, clientFactory)
-
- transport = MyTransport()
- directlyProvides(transport, ITransport)
- tlsProtocol = TLSMemoryBIOProtocol(wrapperFactory, Protocol())
- tlsProtocol.makeConnection(transport)
- self.assertTrue(ITransport.providedBy(tlsProtocol))
-
-
- def test_getHandle(self):
- """
- L{TLSMemoryBIOProtocol.getHandle} returns the L{OpenSSL.SSL.Connection}
- instance it uses to actually implement TLS.
-
- This may seem odd. In fact, it is. The L{OpenSSL.SSL.Connection} is
- not actually the "system handle" here, nor even an object the reactor
- knows about directly. However, L{twisted.internet.ssl.Certificate}'s
- C{peerFromTransport} and C{hostFromTransport} methods depend on being
- able to get an L{OpenSSL.SSL.Connection} object in order to work
- properly. Implementing L{ISystemHandle.getHandle} like this is the
- easiest way for those APIs to be made to work. If they are changed,
- then it may make sense to get rid of this implementation of
- L{ISystemHandle} and return the underlying socket instead.
- """
- factory = ClientFactory()
- contextFactory = ClientContextFactory()
- wrapperFactory = TLSMemoryBIOFactory(contextFactory, True, factory)
- proto = TLSMemoryBIOProtocol(wrapperFactory, Protocol())
- transport = StringTransport()
- proto.makeConnection(transport)
- self.assertIsInstance(proto.getHandle(), ConnectionType)
-
-
- def test_makeConnection(self):
- """
- When L{TLSMemoryBIOProtocol} is connected to a transport, it connects
- the protocol it wraps to a transport.
- """
- clientProtocol = Protocol()
- clientFactory = ClientFactory()
- clientFactory.protocol = lambda: clientProtocol
-
- contextFactory = ClientContextFactory()
- wrapperFactory = TLSMemoryBIOFactory(
- contextFactory, True, clientFactory)
- sslProtocol = wrapperFactory.buildProtocol(None)
-
- transport = StringTransport()
- sslProtocol.makeConnection(transport)
-
- self.assertNotIdentical(clientProtocol.transport, None)
- self.assertNotIdentical(clientProtocol.transport, transport)
- self.assertIdentical(clientProtocol.transport, sslProtocol)
-
-
- def handshakeProtocols(self):
- """
- Start handshake between TLS client and server.
- """
- clientFactory = ClientFactory()
- clientFactory.protocol = Protocol
-
- clientContextFactory, handshakeDeferred = (
- HandshakeCallbackContextFactory.factoryAndDeferred())
- wrapperFactory = TLSMemoryBIOFactory(
- clientContextFactory, True, clientFactory)
- sslClientProtocol = wrapperFactory.buildProtocol(None)
-
- serverFactory = ServerFactory()
- serverFactory.protocol = Protocol
-
- serverContextFactory = DefaultOpenSSLContextFactory(certPath, certPath)
- wrapperFactory = TLSMemoryBIOFactory(
- serverContextFactory, False, serverFactory)
- sslServerProtocol = wrapperFactory.buildProtocol(None)
-
- connectionDeferred = loopbackAsync(sslServerProtocol, sslClientProtocol)
- return (sslClientProtocol, sslServerProtocol, handshakeDeferred,
- connectionDeferred)
-
-
- def test_handshake(self):
- """
- The TLS handshake is performed when L{TLSMemoryBIOProtocol} is
- connected to a transport.
- """
- tlsClient, tlsServer, handshakeDeferred, _ = self.handshakeProtocols()
-
- # Only wait for the handshake to complete. Anything after that isn't
- # important here.
- return handshakeDeferred
-
-
- def test_handshakeFailure(self):
- """
- L{TLSMemoryBIOProtocol} reports errors in the handshake process to the
- application-level protocol object using its C{connectionLost} method
- and disconnects the underlying transport.
- """
- clientConnectionLost = Deferred()
- clientFactory = ClientFactory()
- clientFactory.protocol = (
- lambda: ConnectionLostNotifyingProtocol(
- clientConnectionLost))
-
- clientContextFactory = HandshakeCallbackContextFactory()
- wrapperFactory = TLSMemoryBIOFactory(
- clientContextFactory, True, clientFactory)
- sslClientProtocol = wrapperFactory.buildProtocol(None)
-
- serverConnectionLost = Deferred()
- serverFactory = ServerFactory()
- serverFactory.protocol = (
- lambda: ConnectionLostNotifyingProtocol(
- serverConnectionLost))
-
- # This context factory rejects any clients which do not present a
- # certificate.
- certificateData = FilePath(certPath).getContent()
- certificate = PrivateCertificate.loadPEM(certificateData)
- serverContextFactory = certificate.options(certificate)
- wrapperFactory = TLSMemoryBIOFactory(
- serverContextFactory, False, serverFactory)
- sslServerProtocol = wrapperFactory.buildProtocol(None)
-
- connectionDeferred = loopbackAsync(sslServerProtocol, sslClientProtocol)
-
- def cbConnectionLost(protocol):
- # The connection should close on its own in response to the error
- # induced by the client not supplying the required certificate.
- # After that, check to make sure the protocol's connectionLost was
- # called with the right thing.
- protocol.lostConnectionReason.trap(Error)
- clientConnectionLost.addCallback(cbConnectionLost)
- serverConnectionLost.addCallback(cbConnectionLost)
-
- # Additionally, the underlying transport should have been told to
- # go away.
- return gatherResults([
- clientConnectionLost, serverConnectionLost,
- connectionDeferred])
-
-
- def test_getPeerCertificate(self):
- """
- L{TLSMemoryBIOProtocol.getPeerCertificate} returns the
- L{OpenSSL.crypto.X509Type} instance representing the peer's
- certificate.
- """
- # Set up a client and server so there's a certificate to grab.
- clientFactory = ClientFactory()
- clientFactory.protocol = Protocol
-
- clientContextFactory, handshakeDeferred = (
- HandshakeCallbackContextFactory.factoryAndDeferred())
- wrapperFactory = TLSMemoryBIOFactory(
- clientContextFactory, True, clientFactory)
- sslClientProtocol = wrapperFactory.buildProtocol(None)
-
- serverFactory = ServerFactory()
- serverFactory.protocol = Protocol
-
- serverContextFactory = DefaultOpenSSLContextFactory(certPath, certPath)
- wrapperFactory = TLSMemoryBIOFactory(
- serverContextFactory, False, serverFactory)
- sslServerProtocol = wrapperFactory.buildProtocol(None)
-
- loopbackAsync(sslServerProtocol, sslClientProtocol)
-
- # Wait for the handshake
- def cbHandshook(ignored):
- # Grab the server's certificate and check it out
- cert = sslClientProtocol.getPeerCertificate()
- self.assertIsInstance(cert, X509Type)
- self.assertEqual(
- cert.digest('md5'),
- '9B:A4:AB:43:10:BE:82:AE:94:3E:6B:91:F2:F3:40:E8')
- handshakeDeferred.addCallback(cbHandshook)
- return handshakeDeferred
-
-
- def test_writeAfterHandshake(self):
- """
- Bytes written to L{TLSMemoryBIOProtocol} before the handshake is
- complete are received by the protocol on the other side of the
- connection once the handshake succeeds.
- """
- bytes = "some bytes"
-
- clientProtocol = Protocol()
- clientFactory = ClientFactory()
- clientFactory.protocol = lambda: clientProtocol
-
- clientContextFactory, handshakeDeferred = (
- HandshakeCallbackContextFactory.factoryAndDeferred())
- wrapperFactory = TLSMemoryBIOFactory(
- clientContextFactory, True, clientFactory)
- sslClientProtocol = wrapperFactory.buildProtocol(None)
-
- serverProtocol = AccumulatingProtocol(len(bytes))
- serverFactory = ServerFactory()
- serverFactory.protocol = lambda: serverProtocol
-
- serverContextFactory = DefaultOpenSSLContextFactory(certPath, certPath)
- wrapperFactory = TLSMemoryBIOFactory(
- serverContextFactory, False, serverFactory)
- sslServerProtocol = wrapperFactory.buildProtocol(None)
-
- connectionDeferred = loopbackAsync(sslServerProtocol, sslClientProtocol)
-
- # Wait for the handshake to finish before writing anything.
- def cbHandshook(ignored):
- clientProtocol.transport.write(bytes)
-
- # The server will drop the connection once it gets the bytes.
- return connectionDeferred
- handshakeDeferred.addCallback(cbHandshook)
-
- # Once the connection is lost, make sure the server received the
- # expected bytes.
- def cbDisconnected(ignored):
- self.assertEqual("".join(serverProtocol.received), bytes)
- handshakeDeferred.addCallback(cbDisconnected)
-
- return handshakeDeferred
-
-
- def writeBeforeHandshakeTest(self, sendingProtocol, bytes):
- """
- Run test where client sends data before handshake, given the sending
- protocol and expected bytes.
- """
- clientFactory = ClientFactory()
- clientFactory.protocol = sendingProtocol
-
- clientContextFactory, handshakeDeferred = (
- HandshakeCallbackContextFactory.factoryAndDeferred())
- wrapperFactory = TLSMemoryBIOFactory(
- clientContextFactory, True, clientFactory)
- sslClientProtocol = wrapperFactory.buildProtocol(None)
-
- serverProtocol = AccumulatingProtocol(len(bytes))
- serverFactory = ServerFactory()
- serverFactory.protocol = lambda: serverProtocol
-
- serverContextFactory = DefaultOpenSSLContextFactory(certPath, certPath)
- wrapperFactory = TLSMemoryBIOFactory(
- serverContextFactory, False, serverFactory)
- sslServerProtocol = wrapperFactory.buildProtocol(None)
-
- connectionDeferred = loopbackAsync(sslServerProtocol, sslClientProtocol)
-
- # Wait for the connection to end, then make sure the server received
- # the bytes sent by the client.
- def cbConnectionDone(ignored):
- self.assertEqual("".join(serverProtocol.received), bytes)
- connectionDeferred.addCallback(cbConnectionDone)
- return connectionDeferred
-
-
- def test_writeBeforeHandshake(self):
- """
- Bytes written to L{TLSMemoryBIOProtocol} before the handshake is
- complete are received by the protocol on the other side of the
- connection once the handshake succeeds.
- """
- bytes = "some bytes"
-
- class SimpleSendingProtocol(Protocol):
- def connectionMade(self):
- self.transport.write(bytes)
-
- return self.writeBeforeHandshakeTest(SimpleSendingProtocol, bytes)
-
-
- def test_writeSequence(self):
- """
- Bytes written to L{TLSMemoryBIOProtocol} with C{writeSequence} are
- received by the protocol on the other side of the connection.
- """
- bytes = "some bytes"
- class SimpleSendingProtocol(Protocol):
- def connectionMade(self):
- self.transport.writeSequence(list(bytes))
-
- return self.writeBeforeHandshakeTest(SimpleSendingProtocol, bytes)
-
-
- def test_writeAfterLoseConnection(self):
- """
- Bytes written to L{TLSMemoryBIOProtocol} after C{loseConnection} is
- called are not transmitted (unless there is a registered producer,
- which will be tested elsewhere).
- """
- bytes = "some bytes"
- class SimpleSendingProtocol(Protocol):
- def connectionMade(self):
- self.transport.write(bytes)
- self.transport.loseConnection()
- self.transport.write("hello")
- self.transport.writeSequence(["world"])
- return self.writeBeforeHandshakeTest(SimpleSendingProtocol, bytes)
-
-
- def test_multipleWrites(self):
- """
- If multiple separate TLS messages are received in a single chunk from
- the underlying transport, all of the application bytes from each
- message are delivered to the application-level protocol.
- """
- bytes = [str(i) for i in range(10)]
- class SimpleSendingProtocol(Protocol):
- def connectionMade(self):
- for b in bytes:
- self.transport.write(b)
-
- clientFactory = ClientFactory()
- clientFactory.protocol = SimpleSendingProtocol
-
- clientContextFactory = HandshakeCallbackContextFactory()
- wrapperFactory = TLSMemoryBIOFactory(
- clientContextFactory, True, clientFactory)
- sslClientProtocol = wrapperFactory.buildProtocol(None)
-
- serverProtocol = AccumulatingProtocol(sum(map(len, bytes)))
- serverFactory = ServerFactory()
- serverFactory.protocol = lambda: serverProtocol
-
- serverContextFactory = DefaultOpenSSLContextFactory(certPath, certPath)
- wrapperFactory = TLSMemoryBIOFactory(
- serverContextFactory, False, serverFactory)
- sslServerProtocol = wrapperFactory.buildProtocol(None)
-
- connectionDeferred = loopbackAsync(sslServerProtocol, sslClientProtocol, collapsingPumpPolicy)
-
- # Wait for the connection to end, then make sure the server received
- # the bytes sent by the client.
- def cbConnectionDone(ignored):
- self.assertEqual("".join(serverProtocol.received), ''.join(bytes))
- connectionDeferred.addCallback(cbConnectionDone)
- return connectionDeferred
-
-
- def test_hugeWrite(self):
- """
- If a very long string is passed to L{TLSMemoryBIOProtocol.write}, any
- trailing part of it which cannot be send immediately is buffered and
- sent later.
- """
- bytes = "some bytes"
- factor = 8192
- class SimpleSendingProtocol(Protocol):
- def connectionMade(self):
- self.transport.write(bytes * factor)
-
- clientFactory = ClientFactory()
- clientFactory.protocol = SimpleSendingProtocol
-
- clientContextFactory = HandshakeCallbackContextFactory()
- wrapperFactory = TLSMemoryBIOFactory(
- clientContextFactory, True, clientFactory)
- sslClientProtocol = wrapperFactory.buildProtocol(None)
-
- serverProtocol = AccumulatingProtocol(len(bytes) * factor)
- serverFactory = ServerFactory()
- serverFactory.protocol = lambda: serverProtocol
-
- serverContextFactory = DefaultOpenSSLContextFactory(certPath, certPath)
- wrapperFactory = TLSMemoryBIOFactory(
- serverContextFactory, False, serverFactory)
- sslServerProtocol = wrapperFactory.buildProtocol(None)
-
- connectionDeferred = loopbackAsync(sslServerProtocol, sslClientProtocol)
-
- # Wait for the connection to end, then make sure the server received
- # the bytes sent by the client.
- def cbConnectionDone(ignored):
- self.assertEqual("".join(serverProtocol.received), bytes * factor)
- connectionDeferred.addCallback(cbConnectionDone)
- return connectionDeferred
-
-
- def test_disorderlyShutdown(self):
- """
- If a L{TLSMemoryBIOProtocol} loses its connection unexpectedly, this is
- reported to the application.
- """
- clientConnectionLost = Deferred()
- clientFactory = ClientFactory()
- clientFactory.protocol = (
- lambda: ConnectionLostNotifyingProtocol(
- clientConnectionLost))
-
- clientContextFactory = HandshakeCallbackContextFactory()
- wrapperFactory = TLSMemoryBIOFactory(
- clientContextFactory, True, clientFactory)
- sslClientProtocol = wrapperFactory.buildProtocol(None)
-
- # Client speaks first, so the server can be dumb.
- serverProtocol = Protocol()
-
- loopbackAsync(serverProtocol, sslClientProtocol)
-
- # Now destroy the connection.
- serverProtocol.transport.loseConnection()
-
- # And when the connection completely dies, check the reason.
- def cbDisconnected(clientProtocol):
- clientProtocol.lostConnectionReason.trap(Error)
- clientConnectionLost.addCallback(cbDisconnected)
- return clientConnectionLost
-
-
- def test_loseConnectionAfterHandshake(self):
- """
- L{TLSMemoryBIOProtocol.loseConnection} sends a TLS close alert and
- shuts down the underlying connection cleanly on both sides, after
- transmitting all buffered data.
- """
- class NotifyingProtocol(ConnectionLostNotifyingProtocol):
- def __init__(self, onConnectionLost):
- ConnectionLostNotifyingProtocol.__init__(self,
- onConnectionLost)
- self.data = []
-
- def dataReceived(self, bytes):
- self.data.append(bytes)
-
- clientConnectionLost = Deferred()
- clientFactory = ClientFactory()
- clientProtocol = NotifyingProtocol(clientConnectionLost)
- clientFactory.protocol = lambda: clientProtocol
-
- clientContextFactory, handshakeDeferred = (
- HandshakeCallbackContextFactory.factoryAndDeferred())
- wrapperFactory = TLSMemoryBIOFactory(
- clientContextFactory, True, clientFactory)
- sslClientProtocol = wrapperFactory.buildProtocol(None)
-
- serverConnectionLost = Deferred()
- serverProtocol = NotifyingProtocol(serverConnectionLost)
- serverFactory = ServerFactory()
- serverFactory.protocol = lambda: serverProtocol
-
- serverContextFactory = DefaultOpenSSLContextFactory(certPath, certPath)
- wrapperFactory = TLSMemoryBIOFactory(
- serverContextFactory, False, serverFactory)
- sslServerProtocol = wrapperFactory.buildProtocol(None)
-
- loopbackAsync(sslServerProtocol, sslClientProtocol)
- chunkOfBytes = "123456890" * 100000
-
- # Wait for the handshake before dropping the connection.
- def cbHandshake(ignored):
- # Write more than a single bio_read, to ensure client will still
- # have some data it needs to write when it receives the TLS close
- # alert, and that simply doing a single bio_read won't be
- # sufficient. Thus we will verify that any amount of buffered data
- # will be written out before the connection is closed, rather than
- # just small amounts that can be returned in a single bio_read:
- clientProtocol.transport.write(chunkOfBytes)
- serverProtocol.transport.loseConnection()
-
- # Now wait for the client and server to notice.
- return gatherResults([clientConnectionLost, serverConnectionLost])
- handshakeDeferred.addCallback(cbHandshake)
-
- # Wait for the connection to end, then make sure the client and server
- # weren't notified of a handshake failure that would cause the test to
- # fail.
- def cbConnectionDone((clientProtocol, serverProtocol)):
- clientProtocol.lostConnectionReason.trap(ConnectionDone)
- serverProtocol.lostConnectionReason.trap(ConnectionDone)
-
- # The server should have received all bytes sent by the client:
- self.assertEqual("".join(serverProtocol.data), chunkOfBytes)
-
- # The server should have closed its underlying transport, in
- # addition to whatever it did to shut down the TLS layer.
- self.assertTrue(serverProtocol.transport.q.disconnect)
-
- # The client should also have closed its underlying transport once
- # it saw the server shut down the TLS layer, so as to avoid relying
- # on the server to close the underlying connection.
- self.assertTrue(clientProtocol.transport.q.disconnect)
- handshakeDeferred.addCallback(cbConnectionDone)
- return handshakeDeferred
-
-
- def test_connectionLostOnlyAfterUnderlyingCloses(self):
- """
- The user protocol's connectionLost is only called when transport
- underlying TLS is disconnected.
- """
- class LostProtocol(Protocol):
- disconnected = None
- def connectionLost(self, reason):
- self.disconnected = reason
- wrapperFactory = TLSMemoryBIOFactory(ClientContextFactory(),
- True, ClientFactory())
- protocol = LostProtocol()
- tlsProtocol = TLSMemoryBIOProtocol(wrapperFactory, protocol)
- transport = StringTransport()
- tlsProtocol.makeConnection(transport)
-
- # Pretend TLS shutdown finished cleanly; the underlying transport
- # should be told to close, but the user protocol should not yet be
- # notified:
- tlsProtocol._tlsShutdownFinished(None)
- self.assertEqual(transport.disconnecting, True)
- self.assertEqual(protocol.disconnected, None)
-
- # Now close the underlying connection; the user protocol should be
- # notified with the given reason (since TLS closed cleanly):
- tlsProtocol.connectionLost(Failure(ConnectionLost("ono")))
- self.assertTrue(protocol.disconnected.check(ConnectionLost))
- self.assertEqual(protocol.disconnected.value.args, ("ono",))
-
-
- def test_loseConnectionTwice(self):
- """
- If TLSMemoryBIOProtocol.loseConnection is called multiple times, all
- but the first call have no effect.
- """
- wrapperFactory = TLSMemoryBIOFactory(ClientContextFactory(),
- True, ClientFactory())
- tlsProtocol = TLSMemoryBIOProtocol(wrapperFactory, Protocol())
- transport = StringTransport()
- tlsProtocol.makeConnection(transport)
- self.assertEqual(tlsProtocol.disconnecting, False)
-
- # Make sure loseConnection calls _shutdownTLS the first time (mostly
- # to make sure we've overriding it correctly):
- calls = []
- def _shutdownTLS(shutdown=tlsProtocol._shutdownTLS):
- calls.append(1)
- return shutdown()
- tlsProtocol._shutdownTLS = _shutdownTLS
- tlsProtocol.loseConnection()
- self.assertEqual(tlsProtocol.disconnecting, True)
- self.assertEqual(calls, [1])
-
- # Make sure _shutdownTLS isn't called a second time:
- tlsProtocol.loseConnection()
- self.assertEqual(calls, [1])
-
-
- def test_unexpectedEOF(self):
- """
- Unexpected disconnects get converted to ConnectionLost errors.
- """
- tlsClient, tlsServer, handshakeDeferred, disconnectDeferred = (
- self.handshakeProtocols())
- serverProtocol = tlsServer.wrappedProtocol
- data = []
- reason = []
- serverProtocol.dataReceived = data.append
- serverProtocol.connectionLost = reason.append
-
- # Write data, then disconnect *underlying* transport, resulting in an
- # unexpected TLS disconnect:
- def handshakeDone(ign):
- tlsClient.write("hello")
- tlsClient.transport.loseConnection()
- handshakeDeferred.addCallback(handshakeDone)
-
- # Receiver should be disconnected, with ConnectionLost notification
- # (masking the Unexpected EOF SSL error):
- def disconnected(ign):
- self.assertTrue(reason[0].check(ConnectionLost), reason[0])
- disconnectDeferred.addCallback(disconnected)
- return disconnectDeferred
-
-
- def test_errorWriting(self):
- """
- Errors while writing cause the protocols to be disconnected.
- """
- tlsClient, tlsServer, handshakeDeferred, disconnectDeferred = (
- self.handshakeProtocols())
- reason = []
- tlsClient.wrappedProtocol.connectionLost = reason.append
-
- # Pretend TLS connection is unhappy sending:
- class Wrapper(object):
- def __init__(self, wrapped):
- self._wrapped = wrapped
- def __getattr__(self, attr):
- return getattr(self._wrapped, attr)
- def send(self, *args):
- raise Error("ONO!")
- tlsClient._tlsConnection = Wrapper(tlsClient._tlsConnection)
-
- # Write some data:
- def handshakeDone(ign):
- tlsClient.write("hello")
- handshakeDeferred.addCallback(handshakeDone)
-
- # Failed writer should be disconnected with SSL error:
- def disconnected(ign):
- self.assertTrue(reason[0].check(Error), reason[0])
- disconnectDeferred.addCallback(disconnected)
- return disconnectDeferred
-
-
-
-class TLSProducerTests(TestCase):
- """
- The TLS transport must support the IConsumer interface.
- """
-
- def setupStreamingProducer(self, transport=None):
- class HistoryStringTransport(StringTransport):
- def __init__(self):
- StringTransport.__init__(self)
- self.producerHistory = []
-
- def pauseProducing(self):
- self.producerHistory.append("pause")
- StringTransport.pauseProducing(self)
-
- def resumeProducing(self):
- self.producerHistory.append("resume")
- StringTransport.resumeProducing(self)
-
- def stopProducing(self):
- self.producerHistory.append("stop")
- StringTransport.stopProducing(self)
-
- clientProtocol, tlsProtocol = buildTLSProtocol(transport=transport)
- producer = HistoryStringTransport()
- clientProtocol.transport.registerProducer(producer, True)
- self.assertEqual(tlsProtocol.transport.streaming, True)
- return clientProtocol, tlsProtocol, producer
-
-
- def flushTwoTLSProtocols(self, tlsProtocol, serverTLSProtocol):
- """
- Transfer bytes back and forth between two TLS protocols.
- """
- # We want to make sure all bytes are passed back and forth; JP
- # estimated that 3 rounds should be enough:
- for i in range(3):
- clientData = tlsProtocol.transport.value()
- if clientData:
- serverTLSProtocol.dataReceived(clientData)
- tlsProtocol.transport.clear()
- serverData = serverTLSProtocol.transport.value()
- if serverData:
- tlsProtocol.dataReceived(serverData)
- serverTLSProtocol.transport.clear()
- if not serverData and not clientData:
- break
- self.assertEqual(tlsProtocol.transport.value(), "")
- self.assertEqual(serverTLSProtocol.transport.value(), "")
-
-
- def test_streamingProducerPausedInNormalMode(self):
- """
- When the TLS transport is not blocked on reads, it correctly calls
- pauseProducing on the registered producer.
- """
- _, tlsProtocol, producer = self.setupStreamingProducer()
-
- # The TLS protocol's transport pretends to be full, pausing its
- # producer:
- tlsProtocol.transport.producer.pauseProducing()
- self.assertEqual(producer.producerState, 'paused')
- self.assertEqual(producer.producerHistory, ['pause'])
- self.assertEqual(tlsProtocol._producer._producerPaused, True)
-
-
- def test_streamingProducerResumedInNormalMode(self):
- """
- When the TLS transport is not blocked on reads, it correctly calls
- resumeProducing on the registered producer.
- """
- _, tlsProtocol, producer = self.setupStreamingProducer()
- tlsProtocol.transport.producer.pauseProducing()
- self.assertEqual(producer.producerHistory, ['pause'])
-
- # The TLS protocol's transport pretends to have written everything
- # out, so it resumes its producer:
- tlsProtocol.transport.producer.resumeProducing()
- self.assertEqual(producer.producerState, 'producing')
- self.assertEqual(producer.producerHistory, ['pause', 'resume'])
- self.assertEqual(tlsProtocol._producer._producerPaused, False)
-
-
- def test_streamingProducerPausedInWriteBlockedOnReadMode(self):
- """
- When the TLS transport is blocked on reads, it correctly calls
- pauseProducing on the registered producer.
- """
- clientProtocol, tlsProtocol, producer = self.setupStreamingProducer()
-
- # Write to TLS transport. Because we do this before the initial TLS
- # handshake is finished, writing bytes triggers a WantReadError,
- # indicating that until bytes are read for the handshake, more bytes
- # cannot be written. Thus writing bytes before the handshake should
- # cause the producer to be paused:
- clientProtocol.transport.write("hello")
- self.assertEqual(producer.producerState, 'paused')
- self.assertEqual(producer.producerHistory, ['pause'])
- self.assertEqual(tlsProtocol._producer._producerPaused, True)
-
-
- def test_streamingProducerResumedInWriteBlockedOnReadMode(self):
- """
- When the TLS transport is blocked on reads, it correctly calls
- resumeProducing on the registered producer.
- """
- clientProtocol, tlsProtocol, producer = self.setupStreamingProducer()
-
- # Write to TLS transport, triggering WantReadError; this should cause
- # the producer to be paused. We use a large chunk of data to make sure
- # large writes don't trigger multiple pauses:
- clientProtocol.transport.write("hello world" * 320000)
- self.assertEqual(producer.producerHistory, ['pause'])
-
- # Now deliver bytes that will fix the WantRead condition; this should
- # unpause the producer:
- serverProtocol, serverTLSProtocol = buildTLSProtocol(server=True)
- self.flushTwoTLSProtocols(tlsProtocol, serverTLSProtocol)
- self.assertEqual(producer.producerHistory, ['pause', 'resume'])
- self.assertEqual(tlsProtocol._producer._producerPaused, False)
-
- # Make sure we haven't disconnected for some reason:
- self.assertEqual(tlsProtocol.transport.disconnecting, False)
- self.assertEqual(producer.producerState, 'producing')
-
-
- def test_streamingProducerTwice(self):
- """
- Registering a streaming producer twice throws an exception.
- """
- clientProtocol, tlsProtocol, producer = self.setupStreamingProducer()
- originalProducer = tlsProtocol._producer
- producer2 = object()
- self.assertRaises(RuntimeError,
- clientProtocol.transport.registerProducer, producer2, True)
- self.assertIdentical(tlsProtocol._producer, originalProducer)
-
-
- def test_streamingProducerUnregister(self):
- """
- Unregistering a streaming producer removes it, reverting to initial state.
- """
- clientProtocol, tlsProtocol, producer = self.setupStreamingProducer()
- clientProtocol.transport.unregisterProducer()
- self.assertEqual(tlsProtocol._producer, None)
- self.assertEqual(tlsProtocol.transport.producer, None)
-
-
- def loseConnectionWithProducer(self, writeBlockedOnRead):
- """
- Common code for tests involving writes by producer after
- loseConnection is called.
- """
- clientProtocol, tlsProtocol, producer = self.setupStreamingProducer()
- serverProtocol, serverTLSProtocol = buildTLSProtocol(server=True)
-
- if not writeBlockedOnRead:
- # Do the initial handshake before write:
- self.flushTwoTLSProtocols(tlsProtocol, serverTLSProtocol)
- else:
- # In this case the write below will trigger write-blocked-on-read
- # condition...
- pass
-
- # Now write, then lose connection:
- clientProtocol.transport.write("x ")
- clientProtocol.transport.loseConnection()
- self.flushTwoTLSProtocols(tlsProtocol, serverTLSProtocol)
-
- # Underlying transport should not have loseConnection called yet, nor
- # should producer be stopped:
- self.assertEqual(tlsProtocol.transport.disconnecting, False)
- self.assertFalse("stop" in producer.producerHistory)
-
- # Writes from client to server should continue to go through, since we
- # haven't unregistered producer yet:
- clientProtocol.transport.write("hello")
- clientProtocol.transport.writeSequence([" ", "world"])
-
- # Unregister producer; this should trigger TLS shutdown:
- clientProtocol.transport.unregisterProducer()
- self.assertNotEqual(tlsProtocol.transport.value(), "")
- self.assertEqual(tlsProtocol.transport.disconnecting, False)
-
- # Additional writes should not go through:
- clientProtocol.transport.write("won't")
- clientProtocol.transport.writeSequence(["won't!"])
-
- # Finish TLS close handshake:
- self.flushTwoTLSProtocols(tlsProtocol, serverTLSProtocol)
- self.assertEqual(tlsProtocol.transport.disconnecting, True)
-
- # Bytes made it through, as long as they were written before producer
- # was unregistered:
- self.assertEqual("".join(serverProtocol.received), "x hello world")
-
-
- def test_streamingProducerLoseConnectionWithProducer(self):
- """
- loseConnection() waits for the producer to unregister itself, then
- does a clean TLS close alert, then closes the underlying connection.
- """
- return self.loseConnectionWithProducer(False)
-
-
- def test_streamingProducerLoseConnectionWithProducerWBOR(self):
- """
- Even when writes are blocked on reading, loseConnection() waits for
- the producer to unregister itself, then does a clean TLS close alert,
- then closes the underlying connection.
- """
- return self.loseConnectionWithProducer(True)
-
-
- def test_streamingProducerBothTransportsDecideToPause(self):
- """
- pauseProducing() events can come from both the TLS transport layer and
- the underlying transport. In this case, both decide to pause,
- underlying first.
- """
- class PausingStringTransport(StringTransport):
- _didPause = False
-
- def write(self, data):
- if not self._didPause and self.producer is not None:
- self._didPause = True
- self.producer.pauseProducing()
- StringTransport.write(self, data)
-
-
- class TLSConnection(object):
- def __init__(self):
- self.l = []
-
- def send(self, bytes):
- # on first write, don't send all bytes:
- if not self.l:
- bytes = bytes[:-1]
- # pause on second write:
- if len(self.l) == 1:
- self.l.append("paused")
- raise WantReadError()
- # otherwise just take in data:
- self.l.append(bytes)
- return len(bytes)
-
- def bio_write(self, data):
- pass
-
- def bio_read(self, size):
- return chr(ord('A') + len(self.l))
-
- def recv(self, size):
- raise WantReadError()
-
- transport = PausingStringTransport()
- clientProtocol, tlsProtocol, producer = self.setupStreamingProducer(
- transport)
- self.assertEqual(producer.producerState, 'producing')
-
- # Shove in fake TLSConnection that will raise WantReadError the second
- # time send() is called. This will allow us to have bytes written to
- # to the PausingStringTransport, so it will pause the producer. Then,
- # WantReadError will be thrown, triggering the TLS transport's
- # producer code path.
- tlsProtocol._tlsConnection = TLSConnection()
- clientProtocol.transport.write("hello")
- self.assertEqual(producer.producerState, 'paused')
- self.assertEqual(producer.producerHistory, ['pause'])
-
- # Now, underlying transport resumes, and then we deliver some data to
- # TLS transport so that it will resume:
- tlsProtocol.transport.producer.resumeProducing()
- self.assertEqual(producer.producerState, 'producing')
- self.assertEqual(producer.producerHistory, ['pause', 'resume'])
- tlsProtocol.dataReceived("hello")
- self.assertEqual(producer.producerState, 'producing')
- self.assertEqual(producer.producerHistory, ['pause', 'resume'])
-
-
- def test_streamingProducerStopProducing(self):
- """
- If the underlying transport tells its producer to stopProducing(),
- this is passed on to the high-level producer.
- """
- _, tlsProtocol, producer = self.setupStreamingProducer()
- tlsProtocol.transport.producer.stopProducing()
- self.assertEqual(producer.producerState, 'stopped')
-
-
- def test_nonStreamingProducer(self):
- """
- Non-streaming producers get wrapped as streaming producers.
- """
- clientProtocol, tlsProtocol = buildTLSProtocol()
- producer = NonStreamingProducer(clientProtocol.transport)
-
- # Register non-streaming producer:
- clientProtocol.transport.registerProducer(producer, False)
- streamingProducer = tlsProtocol.transport.producer._producer
-
- # Verify it was wrapped into streaming producer:
- self.assertIsInstance(streamingProducer, _PullToPush)
- self.assertEqual(streamingProducer._producer, producer)
- self.assertEqual(streamingProducer._consumer, clientProtocol.transport)
- self.assertEqual(tlsProtocol.transport.streaming, True)
-
- # Verify the streaming producer was started, and ran until the end:
- def done(ignore):
- # Our own producer is done:
- self.assertEqual(producer.consumer, None)
- # The producer has been unregistered:
- self.assertEqual(tlsProtocol.transport.producer, None)
- # The streaming producer wrapper knows it's done:
- self.assertEqual(streamingProducer._finished, True)
- producer.result.addCallback(done)
-
- serverProtocol, serverTLSProtocol = buildTLSProtocol(server=True)
- self.flushTwoTLSProtocols(tlsProtocol, serverTLSProtocol)
- return producer.result
-
-
- def test_interface(self):
- """
- L{_ProducerMembrane} implements L{IPushProducer}.
- """
- producer = StringTransport()
- membrane = _ProducerMembrane(producer)
- self.assertTrue(verifyObject(IPushProducer, membrane))
-
-
- def registerProducerAfterConnectionLost(self, streaming):
- """
- If a producer is registered after the transport has disconnected, the
- producer is not used, and its stopProducing method is called.
- """
- clientProtocol, tlsProtocol = buildTLSProtocol()
- clientProtocol.connectionLost = lambda reason: reason.trap(Error)
-
- class Producer(object):
- stopped = False
-
- def resumeProducing(self):
- return 1/0 # this should never be called
-
- def stopProducing(self):
- self.stopped = True
-
- # Disconnect the transport:
- tlsProtocol.connectionLost(Failure(ConnectionDone()))
-
- # Register the producer; startProducing should not be called, but
- # stopProducing will:
- producer = Producer()
- tlsProtocol.registerProducer(producer, False)
- self.assertIdentical(tlsProtocol.transport.producer, None)
- self.assertEqual(producer.stopped, True)
-
-
- def test_streamingProducerAfterConnectionLost(self):
- """
- If a streaming producer is registered after the transport has
- disconnected, the producer is not used, and its stopProducing method
- is called.
- """
- self.registerProducerAfterConnectionLost(True)
-
-
- def test_nonStreamingProducerAfterConnectionLost(self):
- """
- If a non-streaming producer is registered after the transport has
- disconnected, the producer is not used, and its stopProducing method
- is called.
- """
- self.registerProducerAfterConnectionLost(False)
-
-
-
-class NonStreamingProducer(object):
- """
- A pull producer which writes 10 times only.
- """
-
- counter = 0
- stopped = False
-
- def __init__(self, consumer):
- self.consumer = consumer
- self.result = Deferred()
-
- def resumeProducing(self):
- if self.counter < 10:
- self.consumer.write(str(self.counter))
- self.counter += 1
- if self.counter == 10:
- self.consumer.unregisterProducer()
- self._done()
- else:
- if self.consumer is None:
- raise RuntimeError("BUG: resume after unregister/stop.")
-
-
- def pauseProducing(self):
- raise RuntimeError("BUG: pause should never be called.")
-
-
- def _done(self):
- self.consumer = None
- d = self.result
- del self.result
- d.callback(None)
-
-
- def stopProducing(self):
- self.stopped = True
- self._done()
-
-
-
-class NonStreamingProducerTests(TestCase):
- """
- Non-streaming producers can be adapted into being streaming producers.
- """
-
- def streamUntilEnd(self, consumer):
- """
- Verify the consumer writes out all its data, but is not called after
- that.
- """
- nsProducer = NonStreamingProducer(consumer)
- streamingProducer = _PullToPush(nsProducer, consumer)
- consumer.registerProducer(streamingProducer, True)
-
- # The producer will call unregisterProducer(), and we need to hook
- # that up so the streaming wrapper is notified; the
- # TLSMemoryBIOProtocol will have to do this itself, which is tested
- # elsewhere:
- def unregister(orig=consumer.unregisterProducer):
- orig()
- streamingProducer.stopStreaming()
- consumer.unregisterProducer = unregister
-
- done = nsProducer.result
- def doneStreaming(_):
- # All data was streamed, and the producer unregistered itself:
- self.assertEqual(consumer.value(), "0123456789")
- self.assertEqual(consumer.producer, None)
- # And the streaming wrapper stopped:
- self.assertEqual(streamingProducer._finished, True)
- done.addCallback(doneStreaming)
-
- # Now, start streaming:
- streamingProducer.startStreaming()
- return done
-
-
- def test_writeUntilDone(self):
- """
- When converted to a streaming producer, the non-streaming producer
- writes out all its data, but is not called after that.
- """
- consumer = StringTransport()
- return self.streamUntilEnd(consumer)
-
-
- def test_pause(self):
- """
- When the streaming producer is paused, the underlying producer stops
- getting resumeProducing calls.
- """
- class PausingStringTransport(StringTransport):
- writes = 0
-
- def __init__(self):
- StringTransport.__init__(self)
- self.paused = Deferred()
-
- def write(self, data):
- self.writes += 1
- StringTransport.write(self, data)
- if self.writes == 3:
- self.producer.pauseProducing()
- d = self.paused
- del self.paused
- d.callback(None)
-
-
- consumer = PausingStringTransport()
- nsProducer = NonStreamingProducer(consumer)
- streamingProducer = _PullToPush(nsProducer, consumer)
- consumer.registerProducer(streamingProducer, True)
-
- # Make sure the consumer does not continue:
- def shouldNotBeCalled(ignore):
- self.fail("BUG: The producer should not finish!")
- nsProducer.result.addCallback(shouldNotBeCalled)
-
- done = consumer.paused
- def paused(ignore):
- # The CooperatorTask driving the producer was paused:
- self.assertEqual(streamingProducer._coopTask._pauseCount, 1)
- done.addCallback(paused)
-
- # Now, start streaming:
- streamingProducer.startStreaming()
- return done
-
-
- def test_resume(self):
- """
- When the streaming producer is paused and then resumed, the underlying
- producer starts getting resumeProducing calls again after the resume.
-
- The test will never finish (or rather, time out) if the resume
- producing call is not working.
- """
- class PausingStringTransport(StringTransport):
- writes = 0
-
- def write(self, data):
- self.writes += 1
- StringTransport.write(self, data)
- if self.writes == 3:
- self.producer.pauseProducing()
- self.producer.resumeProducing()
-
- consumer = PausingStringTransport()
- return self.streamUntilEnd(consumer)
-
-
- def test_stopProducing(self):
- """
- When the streaming producer is stopped by the consumer, the underlying
- producer is stopped, and streaming is stopped.
- """
- class StoppingStringTransport(StringTransport):
- writes = 0
-
- def write(self, data):
- self.writes += 1
- StringTransport.write(self, data)
- if self.writes == 3:
- self.producer.stopProducing()
-
- consumer = StoppingStringTransport()
- nsProducer = NonStreamingProducer(consumer)
- streamingProducer = _PullToPush(nsProducer, consumer)
- consumer.registerProducer(streamingProducer, True)
-
- done = nsProducer.result
- def doneStreaming(_):
- # Not all data was streamed, and the producer was stopped:
- self.assertEqual(consumer.value(), "012")
- self.assertEqual(nsProducer.stopped, True)
- # And the streaming wrapper stopped:
- self.assertEqual(streamingProducer._finished, True)
- done.addCallback(doneStreaming)
-
- # Now, start streaming:
- streamingProducer.startStreaming()
- return done
-
-
- def resumeProducingRaises(self, consumer, expectedExceptions):
- """
- Common implementation for tests where the underlying producer throws
- an exception when its resumeProducing is called.
- """
- class ThrowingProducer(NonStreamingProducer):
-
- def resumeProducing(self):
- if self.counter == 2:
- return 1/0
- else:
- NonStreamingProducer.resumeProducing(self)
-
- nsProducer = ThrowingProducer(consumer)
- streamingProducer = _PullToPush(nsProducer, consumer)
- consumer.registerProducer(streamingProducer, True)
-
- # Register log observer:
- loggedMsgs = []
- log.addObserver(loggedMsgs.append)
- self.addCleanup(log.removeObserver, loggedMsgs.append)
-
- # Make consumer unregister do what TLSMemoryBIOProtocol would do:
- def unregister(orig=consumer.unregisterProducer):
- orig()
- streamingProducer.stopStreaming()
- consumer.unregisterProducer = unregister
-
- # Start streaming:
- streamingProducer.startStreaming()
-
- done = streamingProducer._coopTask.whenDone()
- done.addErrback(lambda reason: reason.trap(TaskStopped))
- def stopped(ign):
- self.assertEqual(consumer.value(), "01")
- # Any errors from resumeProducing were logged:
- errors = self.flushLoggedErrors()
- self.assertEqual(len(errors), len(expectedExceptions))
- for f, (expected, msg), logMsg in zip(
- errors, expectedExceptions, loggedMsgs):
- self.assertTrue(f.check(expected))
- self.assertIn(msg, logMsg['why'])
- # And the streaming wrapper stopped:
- self.assertEqual(streamingProducer._finished, True)
- done.addCallback(stopped)
- return done
-
-
- def test_resumeProducingRaises(self):
- """
- If the underlying producer raises an exception when resumeProducing is
- called, the streaming wrapper should log the error, unregister from
- the consumer and stop streaming.
- """
- consumer = StringTransport()
- done = self.resumeProducingRaises(
- consumer,
- [(ZeroDivisionError, "failed, producing will be stopped")])
- def cleanShutdown(ignore):
- # Producer was unregistered from consumer:
- self.assertEqual(consumer.producer, None)
- done.addCallback(cleanShutdown)
- return done
-
-
- def test_resumeProducingRaiseAndUnregisterProducerRaises(self):
- """
- If the underlying producer raises an exception when resumeProducing is
- called, the streaming wrapper should log the error, unregister from
- the consumer and stop streaming even if the unregisterProducer call
- also raise.
- """
- consumer = StringTransport()
- def raiser():
- raise RuntimeError()
- consumer.unregisterProducer = raiser
- return self.resumeProducingRaises(
- consumer,
- [(ZeroDivisionError, "failed, producing will be stopped"),
- (RuntimeError, "failed to unregister producer")])
-
-
- def test_stopStreamingTwice(self):
- """
- stopStreaming() can be called more than once without blowing
- up. This is useful for error-handling paths.
- """
- consumer = StringTransport()
- nsProducer = NonStreamingProducer(consumer)
- streamingProducer = _PullToPush(nsProducer, consumer)
- streamingProducer.startStreaming()
- streamingProducer.stopStreaming()
- streamingProducer.stopStreaming()
- self.assertEqual(streamingProducer._finished, True)
-
-
- def test_interface(self):
- """
- L{_PullToPush} implements L{IPushProducer}.
- """
- consumer = StringTransport()
- nsProducer = NonStreamingProducer(consumer)
- streamingProducer = _PullToPush(nsProducer, consumer)
- self.assertTrue(verifyObject(IPushProducer, streamingProducer))
diff --git a/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/tls.py b/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/tls.py
deleted file mode 100755
index a1b782f8..00000000
--- a/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/tls.py
+++ /dev/null
@@ -1,613 +0,0 @@
-# -*- test-case-name: twisted.protocols.test.test_tls,twisted.internet.test.test_tls,twisted.test.test_sslverify -*-
-# Copyright (c) Twisted Matrix Laboratories.
-# See LICENSE for details.
-
-"""
-Implementation of a TLS transport (L{ISSLTransport}) as an
-L{IProtocol<twisted.internet.interfaces.IProtocol>} layered on top of any
-L{ITransport<twisted.internet.interfaces.ITransport>} implementation, based on
-U{OpenSSL<http://www.openssl.org>}'s memory BIO features.
-
-L{TLSMemoryBIOFactory} is a L{WrappingFactory} which wraps protocols created by
-the factory it wraps with L{TLSMemoryBIOProtocol}. L{TLSMemoryBIOProtocol}
-intercedes between the underlying transport and the wrapped protocol to
-implement SSL and TLS. Typical usage of this module looks like this::
-
- from twisted.protocols.tls import TLSMemoryBIOFactory
- from twisted.internet.protocol import ServerFactory
- from twisted.internet.ssl import PrivateCertificate
- from twisted.internet import reactor
-
- from someapplication import ApplicationProtocol
-
- serverFactory = ServerFactory()
- serverFactory.protocol = ApplicationProtocol
- certificate = PrivateCertificate.loadPEM(certPEMData)
- contextFactory = certificate.options()
- tlsFactory = TLSMemoryBIOFactory(contextFactory, False, serverFactory)
- reactor.listenTCP(12345, tlsFactory)
- reactor.run()
-
-This API offers somewhat more flexibility than
-L{twisted.internet.interfaces.IReactorSSL}; for example, a L{TLSMemoryBIOProtocol}
-instance can use another instance of L{TLSMemoryBIOProtocol} as its transport,
-yielding TLS over TLS - useful to implement onion routing. It can also be used
-to run TLS over unusual transports, such as UNIX sockets and stdio.
-"""
-
-
-from OpenSSL.SSL import Error, ZeroReturnError, WantReadError
-from OpenSSL.SSL import TLSv1_METHOD, Context, Connection
-
-try:
- Connection(Context(TLSv1_METHOD), None)
-except TypeError, e:
- if str(e) != "argument must be an int, or have a fileno() method.":
- raise
- raise ImportError("twisted.protocols.tls requires pyOpenSSL 0.10 or newer.")
-
-from zope.interface import implements, providedBy, directlyProvides
-
-from twisted.python.failure import Failure
-from twisted.python import log
-from twisted.python.reflect import safe_str
-from twisted.internet.interfaces import ISystemHandle, ISSLTransport
-from twisted.internet.interfaces import IPushProducer, ILoggingContext
-from twisted.internet.main import CONNECTION_LOST
-from twisted.internet.protocol import Protocol
-from twisted.internet.task import cooperate
-from twisted.protocols.policies import ProtocolWrapper, WrappingFactory
-
-
-class _PullToPush(object):
- """
- An adapter that converts a non-streaming to a streaming producer.
-
- Because of limitations of the producer API, this adapter requires the
- cooperation of the consumer. When the consumer's C{registerProducer} is
- called with a non-streaming producer, it must wrap it with L{_PullToPush}
- and then call C{startStreaming} on the resulting object. When the
- consumer's C{unregisterProducer} is called, it must call
- C{stopStreaming} on the L{_PullToPush} instance.
-
- If the underlying producer throws an exception from C{resumeProducing},
- the producer will be unregistered from the consumer.
-
- @ivar _producer: the underling non-streaming producer.
-
- @ivar _consumer: the consumer with which the underlying producer was
- registered.
-
- @ivar _finished: C{bool} indicating whether the producer has finished.
-
- @ivar _coopTask: the result of calling L{cooperate}, the task driving the
- streaming producer.
- """
- implements(IPushProducer)
-
- _finished = False
-
-
- def __init__(self, pullProducer, consumer):
- self._producer = pullProducer
- self._consumer = consumer
-
-
- def _pull(self):
- """
- A generator that calls C{resumeProducing} on the underlying producer
- forever.
-
- If C{resumeProducing} throws an exception, the producer is
- unregistered, which should result in streaming stopping.
- """
- while True:
- try:
- self._producer.resumeProducing()
- except:
- log.err(None, "%s failed, producing will be stopped:" %
- (safe_str(self._producer),))
- try:
- self._consumer.unregisterProducer()
- # The consumer should now call stopStreaming() on us,
- # thus stopping the streaming.
- except:
- # Since the consumer blew up, we may not have had
- # stopStreaming() called, so we just stop on our own:
- log.err(None, "%s failed to unregister producer:" %
- (safe_str(self._consumer),))
- self._finished = True
- return
- yield None
-
-
- def startStreaming(self):
- """
- This should be called by the consumer when the producer is registered.
-
- Start streaming data to the consumer.
- """
- self._coopTask = cooperate(self._pull())
-
-
- def stopStreaming(self):
- """
- This should be called by the consumer when the producer is unregistered.
-
- Stop streaming data to the consumer.
- """
- if self._finished:
- return
- self._finished = True
- self._coopTask.stop()
-
-
- # IPushProducer implementation:
- def pauseProducing(self):
- self._coopTask.pause()
-
-
- def resumeProducing(self):
- self._coopTask.resume()
-
-
- def stopProducing(self):
- self.stopStreaming()
- self._producer.stopProducing()
-
-
-
-class _ProducerMembrane(object):
- """
- Stand-in for producer registered with a L{TLSMemoryBIOProtocol} transport.
-
- Ensures that producer pause/resume events from the undelying transport are
- coordinated with pause/resume events from the TLS layer.
-
- @ivar _producer: The application-layer producer.
- """
- implements(IPushProducer)
-
- _producerPaused = False
-
- def __init__(self, producer):
- self._producer = producer
-
-
- def pauseProducing(self):
- """
- C{pauseProducing} the underlying producer, if it's not paused.
- """
- if self._producerPaused:
- return
- self._producerPaused = True
- self._producer.pauseProducing()
-
-
- def resumeProducing(self):
- """
- C{resumeProducing} the underlying producer, if it's paused.
- """
- if not self._producerPaused:
- return
- self._producerPaused = False
- self._producer.resumeProducing()
-
-
- def stopProducing(self):
- """
- C{stopProducing} the underlying producer.
-
- There is only a single source for this event, so it's simply passed
- on.
- """
- self._producer.stopProducing()
-
-
-
-class TLSMemoryBIOProtocol(ProtocolWrapper):
- """
- L{TLSMemoryBIOProtocol} is a protocol wrapper which uses OpenSSL via a
- memory BIO to encrypt bytes written to it before sending them on to the
- underlying transport and decrypts bytes received from the underlying
- transport before delivering them to the wrapped protocol.
-
- In addition to producer events from the underlying transport, the need to
- wait for reads before a write can proceed means the
- L{TLSMemoryBIOProtocol} may also want to pause a producer. Pause/resume
- events are therefore merged using the L{_ProducerMembrane}
- wrapper. Non-streaming (pull) producers are supported by wrapping them
- with L{_PullToPush}.
-
- @ivar _tlsConnection: The L{OpenSSL.SSL.Connection} instance which is
- encrypted and decrypting this connection.
-
- @ivar _lostTLSConnection: A flag indicating whether connection loss has
- already been dealt with (C{True}) or not (C{False}). TLS disconnection
- is distinct from the underlying connection being lost.
-
- @ivar _writeBlockedOnRead: A flag indicating whether further writing must
- wait for data to be received (C{True}) or not (C{False}).
-
- @ivar _appSendBuffer: A C{list} of C{str} of application-level (cleartext)
- data which is waiting for C{_writeBlockedOnRead} to be reset to
- C{False} so it can be passed to and perhaps accepted by
- C{_tlsConnection.send}.
-
- @ivar _connectWrapped: A flag indicating whether or not to call
- C{makeConnection} on the wrapped protocol. This is for the reactor's
- L{twisted.internet.interfaces.ITLSTransport.startTLS} implementation,
- since it has a protocol which it has already called C{makeConnection}
- on, and which has no interest in a new transport. See #3821.
-
- @ivar _handshakeDone: A flag indicating whether or not the handshake is
- known to have completed successfully (C{True}) or not (C{False}). This
- is used to control error reporting behavior. If the handshake has not
- completed, the underlying L{OpenSSL.SSL.Error} will be passed to the
- application's C{connectionLost} method. If it has completed, any
- unexpected L{OpenSSL.SSL.Error} will be turned into a
- L{ConnectionLost}. This is weird; however, it is simply an attempt at
- a faithful re-implementation of the behavior provided by
- L{twisted.internet.ssl}.
-
- @ivar _reason: If an unexpected L{OpenSSL.SSL.Error} occurs which causes
- the connection to be lost, it is saved here. If appropriate, this may
- be used as the reason passed to the application protocol's
- C{connectionLost} method.
-
- @ivar _producer: The current producer registered via C{registerProducer},
- or C{None} if no producer has been registered or a previous one was
- unregistered.
- """
- implements(ISystemHandle, ISSLTransport)
-
- _reason = None
- _handshakeDone = False
- _lostTLSConnection = False
- _writeBlockedOnRead = False
- _producer = None
-
- def __init__(self, factory, wrappedProtocol, _connectWrapped=True):
- ProtocolWrapper.__init__(self, factory, wrappedProtocol)
- self._connectWrapped = _connectWrapped
-
-
- def getHandle(self):
- """
- Return the L{OpenSSL.SSL.Connection} object being used to encrypt and
- decrypt this connection.
-
- This is done for the benefit of L{twisted.internet.ssl.Certificate}'s
- C{peerFromTransport} and C{hostFromTransport} methods only. A
- different system handle may be returned by future versions of this
- method.
- """
- return self._tlsConnection
-
-
- def makeConnection(self, transport):
- """
- Connect this wrapper to the given transport and initialize the
- necessary L{OpenSSL.SSL.Connection} with a memory BIO.
- """
- tlsContext = self.factory._contextFactory.getContext()
- self._tlsConnection = Connection(tlsContext, None)
- if self.factory._isClient:
- self._tlsConnection.set_connect_state()
- else:
- self._tlsConnection.set_accept_state()
- self._appSendBuffer = []
-
- # Add interfaces provided by the transport we are wrapping:
- for interface in providedBy(transport):
- directlyProvides(self, interface)
-
- # Intentionally skip ProtocolWrapper.makeConnection - it might call
- # wrappedProtocol.makeConnection, which we want to make conditional.
- Protocol.makeConnection(self, transport)
- self.factory.registerProtocol(self)
- if self._connectWrapped:
- # Now that the TLS layer is initialized, notify the application of
- # the connection.
- ProtocolWrapper.makeConnection(self, transport)
-
- # Now that we ourselves have a transport (initialized by the
- # ProtocolWrapper.makeConnection call above), kick off the TLS
- # handshake.
- try:
- self._tlsConnection.do_handshake()
- except WantReadError:
- # This is the expected case - there's no data in the connection's
- # input buffer yet, so it won't be able to complete the whole
- # handshake now. If this is the speak-first side of the
- # connection, then some bytes will be in the send buffer now; flush
- # them.
- self._flushSendBIO()
-
-
- def _flushSendBIO(self):
- """
- Read any bytes out of the send BIO and write them to the underlying
- transport.
- """
- try:
- bytes = self._tlsConnection.bio_read(2 ** 15)
- except WantReadError:
- # There may be nothing in the send BIO right now.
- pass
- else:
- self.transport.write(bytes)
-
-
- def _flushReceiveBIO(self):
- """
- Try to receive any application-level bytes which are now available
- because of a previous write into the receive BIO. This will take
- care of delivering any application-level bytes which are received to
- the protocol, as well as handling of the various exceptions which
- can come from trying to get such bytes.
- """
- # Keep trying this until an error indicates we should stop or we
- # close the connection. Looping is necessary to make sure we
- # process all of the data which was put into the receive BIO, as
- # there is no guarantee that a single recv call will do it all.
- while not self._lostTLSConnection:
- try:
- bytes = self._tlsConnection.recv(2 ** 15)
- except WantReadError:
- # The newly received bytes might not have been enough to produce
- # any application data.
- break
- except ZeroReturnError:
- # TLS has shut down and no more TLS data will be received over
- # this connection.
- self._shutdownTLS()
- # Passing in None means the user protocol's connnectionLost
- # will get called with reason from underlying transport:
- self._tlsShutdownFinished(None)
- except Error, e:
- # Something went pretty wrong. For example, this might be a
- # handshake failure (because there were no shared ciphers, because
- # a certificate failed to verify, etc). TLS can no longer proceed.
-
- # Squash EOF in violation of protocol into ConnectionLost; we
- # create Failure before calling _flushSendBio so that no new
- # exception will get thrown in the interim.
- if e.args[0] == -1 and e.args[1] == 'Unexpected EOF':
- failure = Failure(CONNECTION_LOST)
- else:
- failure = Failure()
-
- self._flushSendBIO()
- self._tlsShutdownFinished(failure)
- else:
- # If we got application bytes, the handshake must be done by
- # now. Keep track of this to control error reporting later.
- self._handshakeDone = True
- ProtocolWrapper.dataReceived(self, bytes)
-
- # The received bytes might have generated a response which needs to be
- # sent now. For example, the handshake involves several round-trip
- # exchanges without ever producing application-bytes.
- self._flushSendBIO()
-
-
- def dataReceived(self, bytes):
- """
- Deliver any received bytes to the receive BIO and then read and deliver
- to the application any application-level data which becomes available
- as a result of this.
- """
- self._tlsConnection.bio_write(bytes)
-
- if self._writeBlockedOnRead:
- # A read just happened, so we might not be blocked anymore. Try to
- # flush all the pending application bytes.
- self._writeBlockedOnRead = False
- appSendBuffer = self._appSendBuffer
- self._appSendBuffer = []
- for bytes in appSendBuffer:
- self._write(bytes)
- if (not self._writeBlockedOnRead and self.disconnecting and
- self.producer is None):
- self._shutdownTLS()
- if self._producer is not None:
- self._producer.resumeProducing()
-
- self._flushReceiveBIO()
-
-
- def _shutdownTLS(self):
- """
- Initiate, or reply to, the shutdown handshake of the TLS layer.
- """
- shutdownSuccess = self._tlsConnection.shutdown()
- self._flushSendBIO()
- if shutdownSuccess:
- # Both sides have shutdown, so we can start closing lower-level
- # transport. This will also happen if we haven't started
- # negotiation at all yet, in which case shutdown succeeds
- # immediately.
- self.transport.loseConnection()
-
-
- def _tlsShutdownFinished(self, reason):
- """
- Called when TLS connection has gone away; tell underlying transport to
- disconnect.
- """
- self._reason = reason
- self._lostTLSConnection = True
- # Using loseConnection causes the application protocol's
- # connectionLost method to be invoked non-reentrantly, which is always
- # a nice feature. However, for error cases (reason != None) we might
- # want to use abortConnection when it becomes available. The
- # loseConnection call is basically tested by test_handshakeFailure.
- # At least one side will need to do it or the test never finishes.
- self.transport.loseConnection()
-
-
- def connectionLost(self, reason):
- """
- Handle the possible repetition of calls to this method (due to either
- the underlying transport going away or due to an error at the TLS
- layer) and make sure the base implementation only gets invoked once.
- """
- if not self._lostTLSConnection:
- # Tell the TLS connection that it's not going to get any more data
- # and give it a chance to finish reading.
- self._tlsConnection.bio_shutdown()
- self._flushReceiveBIO()
- self._lostTLSConnection = True
- reason = self._reason or reason
- self._reason = None
- ProtocolWrapper.connectionLost(self, reason)
-
-
- def loseConnection(self):
- """
- Send a TLS close alert and close the underlying connection.
- """
- if self.disconnecting:
- return
- self.disconnecting = True
- if not self._writeBlockedOnRead and self._producer is None:
- self._shutdownTLS()
-
-
- def write(self, bytes):
- """
- Process the given application bytes and send any resulting TLS traffic
- which arrives in the send BIO.
-
- If C{loseConnection} was called, subsequent calls to C{write} will
- drop the bytes on the floor.
- """
- # Writes after loseConnection are not supported, unless a producer has
- # been registered, in which case writes can happen until the producer
- # is unregistered:
- if self.disconnecting and self._producer is None:
- return
- self._write(bytes)
-
-
- def _write(self, bytes):
- """
- Process the given application bytes and send any resulting TLS traffic
- which arrives in the send BIO.
-
- This may be called by C{dataReceived} with bytes that were buffered
- before C{loseConnection} was called, which is why this function
- doesn't check for disconnection but accepts the bytes regardless.
- """
- if self._lostTLSConnection:
- return
-
- leftToSend = bytes
- while leftToSend:
- try:
- sent = self._tlsConnection.send(leftToSend)
- except WantReadError:
- self._writeBlockedOnRead = True
- self._appSendBuffer.append(leftToSend)
- if self._producer is not None:
- self._producer.pauseProducing()
- break
- except Error:
- # Pretend TLS connection disconnected, which will trigger
- # disconnect of underlying transport. The error will be passed
- # to the application protocol's connectionLost method. The
- # other SSL implementation doesn't, but losing helpful
- # debugging information is a bad idea.
- self._tlsShutdownFinished(Failure())
- break
- else:
- # If we sent some bytes, the handshake must be done. Keep
- # track of this to control error reporting behavior.
- self._handshakeDone = True
- self._flushSendBIO()
- leftToSend = leftToSend[sent:]
-
-
- def writeSequence(self, iovec):
- """
- Write a sequence of application bytes by joining them into one string
- and passing them to L{write}.
- """
- self.write("".join(iovec))
-
-
- def getPeerCertificate(self):
- return self._tlsConnection.get_peer_certificate()
-
-
- def registerProducer(self, producer, streaming):
- # If we've already disconnected, nothing to do here:
- if self._lostTLSConnection:
- producer.stopProducing()
- return
-
- # If we received a non-streaming producer, wrap it so it becomes a
- # streaming producer:
- if not streaming:
- producer = streamingProducer = _PullToPush(producer, self)
- producer = _ProducerMembrane(producer)
- # This will raise an exception if a producer is already registered:
- self.transport.registerProducer(producer, True)
- self._producer = producer
- # If we received a non-streaming producer, we need to start the
- # streaming wrapper:
- if not streaming:
- streamingProducer.startStreaming()
-
-
- def unregisterProducer(self):
- # If we received a non-streaming producer, we need to stop the
- # streaming wrapper:
- if isinstance(self._producer._producer, _PullToPush):
- self._producer._producer.stopStreaming()
- self._producer = None
- self._producerPaused = False
- self.transport.unregisterProducer()
- if self.disconnecting and not self._writeBlockedOnRead:
- self._shutdownTLS()
-
-
-
-class TLSMemoryBIOFactory(WrappingFactory):
- """
- L{TLSMemoryBIOFactory} adds TLS to connections.
-
- @ivar _contextFactory: The TLS context factory which will be used to define
- certain TLS connection parameters.
-
- @ivar _isClient: A flag which is C{True} if this is a client TLS
- connection, C{False} if it is a server TLS connection.
- """
- protocol = TLSMemoryBIOProtocol
-
- noisy = False # disable unnecessary logging.
-
- def __init__(self, contextFactory, isClient, wrappedFactory):
- WrappingFactory.__init__(self, wrappedFactory)
- self._contextFactory = contextFactory
- self._isClient = isClient
-
- # Force some parameter checking in pyOpenSSL. It's better to fail now
- # than after we've set up the transport.
- contextFactory.getContext()
-
-
- def logPrefix(self):
- """
- Annotate the wrapped factory's log prefix with some text indicating TLS
- is in use.
-
- @rtype: C{str}
- """
- if ILoggingContext.providedBy(self.wrappedFactory):
- logPrefix = self.wrappedFactory.logPrefix()
- else:
- logPrefix = self.wrappedFactory.__class__.__name__
- return "%s (TLS)" % (logPrefix,)
-
diff --git a/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/wire.py b/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/wire.py
deleted file mode 100755
index dddf215f..00000000
--- a/lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/protocols/wire.py
+++ /dev/null
@@ -1,90 +0,0 @@
-# Copyright (c) Twisted Matrix Laboratories.
-# See LICENSE for details.
-
-"""Implement standard (and unused) TCP protocols.
-
-These protocols are either provided by inetd, or are not provided at all.
-"""
-
-# system imports
-import time, struct
-from zope.interface import implements
-
-# twisted import
-from twisted.internet import protocol, interfaces
-
-
-class Echo(protocol.Protocol):
- """As soon as any data is received, write it back (RFC 862)"""
-
- def dataReceived(self, data):
- self.transport.write(data)
-
-
-class Discard(protocol.Protocol):
- """Discard any received data (RFC 863)"""
-
- def dataReceived(self, data):
- # I'm ignoring you, nyah-nyah
- pass
-
-
-class Chargen(protocol.Protocol):
- """Generate repeating noise (RFC 864)"""
- noise = r'@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~ !"#$%&?'
-
- implements(interfaces.IProducer)
-
- def connectionMade(self):
- self.transport.registerProducer(self, 0)
-
- def resumeProducing(self):
- self.transport.write(self.noise)
-
- def pauseProducing(self):
- pass
-
- def stopProducing(self):
- pass
-
-
-class QOTD(protocol.Protocol):
- """Return a quote of the day (RFC 865)"""
-
- def connectionMade(self):
- self.transport.write(self.getQuote())
- self.transport.loseConnection()
-
- def getQuote(self):
- """Return a quote. May be overrriden in subclasses."""
- return "An apple a day keeps the doctor away.\r\n"
-
-class Who(protocol.Protocol):
- """Return list of active users (RFC 866)"""
-
- def connectionMade(self):
- self.transport.write(self.getUsers())
- self.transport.loseConnection()
-
- def getUsers(self):
- """Return active users. Override in subclasses."""
- return "root\r\n"
-
-
-class Daytime(protocol.Protocol):
- """Send back the daytime in ASCII form (RFC 867)"""
-
- def connectionMade(self):
- self.transport.write(time.asctime(time.gmtime(time.time())) + '\r\n')
- self.transport.loseConnection()
-
-
-class Time(protocol.Protocol):
- """Send back the time in machine readable form (RFC 868)"""
-
- def connectionMade(self):
- # is this correct only for 32-bit machines?
- result = struct.pack("!i", int(time.time()))
- self.transport.write(result)
- self.transport.loseConnection()
-