summaryrefslogtreecommitdiffstats
path: root/module/lib/beaker
diff options
context:
space:
mode:
Diffstat (limited to 'module/lib/beaker')
-rw-r--r--module/lib/beaker/__init__.py1
-rw-r--r--module/lib/beaker/cache.py459
-rw-r--r--module/lib/beaker/container.py633
-rw-r--r--module/lib/beaker/converters.py26
-rw-r--r--module/lib/beaker/crypto/__init__.py40
-rw-r--r--module/lib/beaker/crypto/jcecrypto.py30
-rw-r--r--module/lib/beaker/crypto/pbkdf2.py342
-rw-r--r--module/lib/beaker/crypto/pycrypto.py31
-rw-r--r--module/lib/beaker/crypto/util.py30
-rw-r--r--module/lib/beaker/exceptions.py24
-rw-r--r--module/lib/beaker/ext/__init__.py0
-rw-r--r--module/lib/beaker/ext/database.py165
-rw-r--r--module/lib/beaker/ext/google.py120
-rw-r--r--module/lib/beaker/ext/memcached.py82
-rw-r--r--module/lib/beaker/ext/sqla.py133
-rw-r--r--module/lib/beaker/middleware.py165
-rw-r--r--module/lib/beaker/session.py618
-rw-r--r--module/lib/beaker/synchronization.py381
-rw-r--r--module/lib/beaker/util.py302
19 files changed, 0 insertions, 3582 deletions
diff --git a/module/lib/beaker/__init__.py b/module/lib/beaker/__init__.py
deleted file mode 100644
index 792d60054..000000000
--- a/module/lib/beaker/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-#
diff --git a/module/lib/beaker/cache.py b/module/lib/beaker/cache.py
deleted file mode 100644
index 4a96537ff..000000000
--- a/module/lib/beaker/cache.py
+++ /dev/null
@@ -1,459 +0,0 @@
-"""Cache object
-
-The Cache object is used to manage a set of cache files and their
-associated backend. The backends can be rotated on the fly by
-specifying an alternate type when used.
-
-Advanced users can add new backends in beaker.backends
-
-"""
-
-import warnings
-
-import beaker.container as container
-import beaker.util as util
-from beaker.exceptions import BeakerException, InvalidCacheBackendError
-
-import beaker.ext.memcached as memcached
-import beaker.ext.database as database
-import beaker.ext.sqla as sqla
-import beaker.ext.google as google
-
-# Initialize the basic available backends
-clsmap = {
- 'memory':container.MemoryNamespaceManager,
- 'dbm':container.DBMNamespaceManager,
- 'file':container.FileNamespaceManager,
- 'ext:memcached':memcached.MemcachedNamespaceManager,
- 'ext:database':database.DatabaseNamespaceManager,
- 'ext:sqla': sqla.SqlaNamespaceManager,
- 'ext:google': google.GoogleNamespaceManager,
- }
-
-# Initialize the cache region dict
-cache_regions = {}
-cache_managers = {}
-
-try:
- import pkg_resources
-
- # Load up the additional entry point defined backends
- for entry_point in pkg_resources.iter_entry_points('beaker.backends'):
- try:
- NamespaceManager = entry_point.load()
- name = entry_point.name
- if name in clsmap:
- raise BeakerException("NamespaceManager name conflict,'%s' "
- "already loaded" % name)
- clsmap[name] = NamespaceManager
- except (InvalidCacheBackendError, SyntaxError):
- # Ignore invalid backends
- pass
- except:
- import sys
- from pkg_resources import DistributionNotFound
- # Warn when there's a problem loading a NamespaceManager
- if not isinstance(sys.exc_info()[1], DistributionNotFound):
- import traceback
- from StringIO import StringIO
- tb = StringIO()
- traceback.print_exc(file=tb)
- warnings.warn("Unable to load NamespaceManager entry point: '%s': "
- "%s" % (entry_point, tb.getvalue()), RuntimeWarning,
- 2)
-except ImportError:
- pass
-
-
-
-
-def cache_region(region, *deco_args):
- """Decorate a function to cache itself using a cache region
-
- The region decorator requires arguments if there are more than
- 2 of the same named function, in the same module. This is
- because the namespace used for the functions cache is based on
- the functions name and the module.
-
-
- Example::
-
- # Add cache region settings to beaker:
- beaker.cache.cache_regions.update(dict_of_config_region_options))
-
- @cache_region('short_term', 'some_data')
- def populate_things(search_term, limit, offset):
- return load_the_data(search_term, limit, offset)
-
- return load('rabbits', 20, 0)
-
- .. note::
-
- The function being decorated must only be called with
- positional arguments.
-
- """
- cache = [None]
-
- def decorate(func):
- namespace = util.func_namespace(func)
- def cached(*args):
- reg = cache_regions[region]
- if not reg.get('enabled', True):
- return func(*args)
-
- if not cache[0]:
- if region not in cache_regions:
- raise BeakerException('Cache region not configured: %s' % region)
- cache[0] = Cache._get_cache(namespace, reg)
-
- cache_key = " ".join(map(str, deco_args + args))
- def go():
- return func(*args)
-
- return cache[0].get_value(cache_key, createfunc=go)
- cached._arg_namespace = namespace
- cached._arg_region = region
- return cached
- return decorate
-
-
-def region_invalidate(namespace, region, *args):
- """Invalidate a cache region namespace or decorated function
-
- This function only invalidates cache spaces created with the
- cache_region decorator.
-
- :param namespace: Either the namespace of the result to invalidate, or the
- cached function reference
-
- :param region: The region the function was cached to. If the function was
- cached to a single region then this argument can be None
-
- :param args: Arguments that were used to differentiate the cached
- function as well as the arguments passed to the decorated
- function
-
- Example::
-
- # Add cache region settings to beaker:
- beaker.cache.cache_regions.update(dict_of_config_region_options))
-
- def populate_things(invalidate=False):
-
- @cache_region('short_term', 'some_data')
- def load(search_term, limit, offset):
- return load_the_data(search_term, limit, offset)
-
- # If the results should be invalidated first
- if invalidate:
- region_invalidate(load, None, 'some_data',
- 'rabbits', 20, 0)
- return load('rabbits', 20, 0)
-
- """
- if callable(namespace):
- if not region:
- region = namespace._arg_region
- namespace = namespace._arg_namespace
-
- if not region:
- raise BeakerException("Region or callable function "
- "namespace is required")
- else:
- region = cache_regions[region]
-
- cache = Cache._get_cache(namespace, region)
- cache_key = " ".join(str(x) for x in args)
- cache.remove_value(cache_key)
-
-
-class Cache(object):
- """Front-end to the containment API implementing a data cache.
-
- :param namespace: the namespace of this Cache
-
- :param type: type of cache to use
-
- :param expire: seconds to keep cached data
-
- :param expiretime: seconds to keep cached data (legacy support)
-
- :param starttime: time when cache was cache was
-
- """
- def __init__(self, namespace, type='memory', expiretime=None,
- starttime=None, expire=None, **nsargs):
- try:
- cls = clsmap[type]
- if isinstance(cls, InvalidCacheBackendError):
- raise cls
- except KeyError:
- raise TypeError("Unknown cache implementation %r" % type)
-
- self.namespace = cls(namespace, **nsargs)
- self.expiretime = expiretime or expire
- self.starttime = starttime
- self.nsargs = nsargs
-
- @classmethod
- def _get_cache(cls, namespace, kw):
- key = namespace + str(kw)
- try:
- return cache_managers[key]
- except KeyError:
- cache_managers[key] = cache = cls(namespace, **kw)
- return cache
-
- def put(self, key, value, **kw):
- self._get_value(key, **kw).set_value(value)
- set_value = put
-
- def get(self, key, **kw):
- """Retrieve a cached value from the container"""
- return self._get_value(key, **kw).get_value()
- get_value = get
-
- def remove_value(self, key, **kw):
- mycontainer = self._get_value(key, **kw)
- if mycontainer.has_current_value():
- mycontainer.clear_value()
- remove = remove_value
-
- def _get_value(self, key, **kw):
- if isinstance(key, unicode):
- key = key.encode('ascii', 'backslashreplace')
-
- if 'type' in kw:
- return self._legacy_get_value(key, **kw)
-
- kw.setdefault('expiretime', self.expiretime)
- kw.setdefault('starttime', self.starttime)
-
- return container.Value(key, self.namespace, **kw)
-
- @util.deprecated("Specifying a "
- "'type' and other namespace configuration with cache.get()/put()/etc. "
- "is deprecated. Specify 'type' and other namespace configuration to "
- "cache_manager.get_cache() and/or the Cache constructor instead.")
- def _legacy_get_value(self, key, type, **kw):
- expiretime = kw.pop('expiretime', self.expiretime)
- starttime = kw.pop('starttime', None)
- createfunc = kw.pop('createfunc', None)
- kwargs = self.nsargs.copy()
- kwargs.update(kw)
- c = Cache(self.namespace.namespace, type=type, **kwargs)
- return c._get_value(key, expiretime=expiretime, createfunc=createfunc,
- starttime=starttime)
-
- def clear(self):
- """Clear all the values from the namespace"""
- self.namespace.remove()
-
- # dict interface
- def __getitem__(self, key):
- return self.get(key)
-
- def __contains__(self, key):
- return self._get_value(key).has_current_value()
-
- def has_key(self, key):
- return key in self
-
- def __delitem__(self, key):
- self.remove_value(key)
-
- def __setitem__(self, key, value):
- self.put(key, value)
-
-
-class CacheManager(object):
- def __init__(self, **kwargs):
- """Initialize a CacheManager object with a set of options
-
- Options should be parsed with the
- :func:`~beaker.util.parse_cache_config_options` function to
- ensure only valid options are used.
-
- """
- self.kwargs = kwargs
- self.regions = kwargs.pop('cache_regions', {})
-
- # Add these regions to the module global
- cache_regions.update(self.regions)
-
- def get_cache(self, name, **kwargs):
- kw = self.kwargs.copy()
- kw.update(kwargs)
- return Cache._get_cache(name, kw)
-
- def get_cache_region(self, name, region):
- if region not in self.regions:
- raise BeakerException('Cache region not configured: %s' % region)
- kw = self.regions[region]
- return Cache._get_cache(name, kw)
-
- def region(self, region, *args):
- """Decorate a function to cache itself using a cache region
-
- The region decorator requires arguments if there are more than
- 2 of the same named function, in the same module. This is
- because the namespace used for the functions cache is based on
- the functions name and the module.
-
-
- Example::
-
- # Assuming a cache object is available like:
- cache = CacheManager(dict_of_config_options)
-
-
- def populate_things():
-
- @cache.region('short_term', 'some_data')
- def load(search_term, limit, offset):
- return load_the_data(search_term, limit, offset)
-
- return load('rabbits', 20, 0)
-
- .. note::
-
- The function being decorated must only be called with
- positional arguments.
-
- """
- return cache_region(region, *args)
-
- def region_invalidate(self, namespace, region, *args):
- """Invalidate a cache region namespace or decorated function
-
- This function only invalidates cache spaces created with the
- cache_region decorator.
-
- :param namespace: Either the namespace of the result to invalidate, or the
- name of the cached function
-
- :param region: The region the function was cached to. If the function was
- cached to a single region then this argument can be None
-
- :param args: Arguments that were used to differentiate the cached
- function as well as the arguments passed to the decorated
- function
-
- Example::
-
- # Assuming a cache object is available like:
- cache = CacheManager(dict_of_config_options)
-
- def populate_things(invalidate=False):
-
- @cache.region('short_term', 'some_data')
- def load(search_term, limit, offset):
- return load_the_data(search_term, limit, offset)
-
- # If the results should be invalidated first
- if invalidate:
- cache.region_invalidate(load, None, 'some_data',
- 'rabbits', 20, 0)
- return load('rabbits', 20, 0)
-
-
- """
- return region_invalidate(namespace, region, *args)
- if callable(namespace):
- if not region:
- region = namespace._arg_region
- namespace = namespace._arg_namespace
-
- if not region:
- raise BeakerException("Region or callable function "
- "namespace is required")
- else:
- region = self.regions[region]
-
- cache = self.get_cache(namespace, **region)
- cache_key = " ".join(str(x) for x in args)
- cache.remove_value(cache_key)
-
- def cache(self, *args, **kwargs):
- """Decorate a function to cache itself with supplied parameters
-
- :param args: Used to make the key unique for this function, as in region()
- above.
-
- :param kwargs: Parameters to be passed to get_cache(), will override defaults
-
- Example::
-
- # Assuming a cache object is available like:
- cache = CacheManager(dict_of_config_options)
-
-
- def populate_things():
-
- @cache.cache('mycache', expire=15)
- def load(search_term, limit, offset):
- return load_the_data(search_term, limit, offset)
-
- return load('rabbits', 20, 0)
-
- .. note::
-
- The function being decorated must only be called with
- positional arguments.
-
- """
- cache = [None]
- key = " ".join(str(x) for x in args)
-
- def decorate(func):
- namespace = util.func_namespace(func)
- def cached(*args):
- if not cache[0]:
- cache[0] = self.get_cache(namespace, **kwargs)
- cache_key = key + " " + " ".join(str(x) for x in args)
- def go():
- return func(*args)
- return cache[0].get_value(cache_key, createfunc=go)
- cached._arg_namespace = namespace
- return cached
- return decorate
-
- def invalidate(self, func, *args, **kwargs):
- """Invalidate a cache decorated function
-
- This function only invalidates cache spaces created with the
- cache decorator.
-
- :param func: Decorated function to invalidate
-
- :param args: Used to make the key unique for this function, as in region()
- above.
-
- :param kwargs: Parameters that were passed for use by get_cache(), note that
- this is only required if a ``type`` was specified for the
- function
-
- Example::
-
- # Assuming a cache object is available like:
- cache = CacheManager(dict_of_config_options)
-
-
- def populate_things(invalidate=False):
-
- @cache.cache('mycache', type="file", expire=15)
- def load(search_term, limit, offset):
- return load_the_data(search_term, limit, offset)
-
- # If the results should be invalidated first
- if invalidate:
- cache.invalidate(load, 'mycache', 'rabbits', 20, 0, type="file")
- return load('rabbits', 20, 0)
-
- """
- namespace = func._arg_namespace
-
- cache = self.get_cache(namespace, **kwargs)
- cache_key = " ".join(str(x) for x in args)
- cache.remove_value(cache_key)
diff --git a/module/lib/beaker/container.py b/module/lib/beaker/container.py
deleted file mode 100644
index 515e97af6..000000000
--- a/module/lib/beaker/container.py
+++ /dev/null
@@ -1,633 +0,0 @@
-"""Container and Namespace classes"""
-import anydbm
-import cPickle
-import logging
-import os
-import time
-
-import beaker.util as util
-from beaker.exceptions import CreationAbortedError, MissingCacheParameter
-from beaker.synchronization import _threading, file_synchronizer, \
- mutex_synchronizer, NameLock, null_synchronizer
-
-__all__ = ['Value', 'Container', 'ContainerContext',
- 'MemoryContainer', 'DBMContainer', 'NamespaceManager',
- 'MemoryNamespaceManager', 'DBMNamespaceManager', 'FileContainer',
- 'OpenResourceNamespaceManager',
- 'FileNamespaceManager', 'CreationAbortedError']
-
-
-logger = logging.getLogger('beaker.container')
-if logger.isEnabledFor(logging.DEBUG):
- debug = logger.debug
-else:
- def debug(message, *args):
- pass
-
-
-class NamespaceManager(object):
- """Handles dictionary operations and locking for a namespace of
- values.
-
- The implementation for setting and retrieving the namespace data is
- handled by subclasses.
-
- NamespaceManager may be used alone, or may be privately accessed by
- one or more Container objects. Container objects provide per-key
- services like expiration times and automatic recreation of values.
-
- Multiple NamespaceManagers created with a particular name will all
- share access to the same underlying datasource and will attempt to
- synchronize against a common mutex object. The scope of this
- sharing may be within a single process or across multiple
- processes, depending on the type of NamespaceManager used.
-
- The NamespaceManager itself is generally threadsafe, except in the
- case of the DBMNamespaceManager in conjunction with the gdbm dbm
- implementation.
-
- """
-
- @classmethod
- def _init_dependencies(cls):
- pass
-
- def __init__(self, namespace):
- self._init_dependencies()
- self.namespace = namespace
-
- def get_creation_lock(self, key):
- raise NotImplementedError()
-
- def do_remove(self):
- raise NotImplementedError()
-
- def acquire_read_lock(self):
- pass
-
- def release_read_lock(self):
- pass
-
- def acquire_write_lock(self, wait=True):
- return True
-
- def release_write_lock(self):
- pass
-
- def has_key(self, key):
- return self.__contains__(key)
-
- def __getitem__(self, key):
- raise NotImplementedError()
-
- def __setitem__(self, key, value):
- raise NotImplementedError()
-
- def set_value(self, key, value, expiretime=None):
- """Optional set_value() method called by Value.
-
- Allows an expiretime to be passed, for namespace
- implementations which can prune their collections
- using expiretime.
-
- """
- self[key] = value
-
- def __contains__(self, key):
- raise NotImplementedError()
-
- def __delitem__(self, key):
- raise NotImplementedError()
-
- def keys(self):
- raise NotImplementedError()
-
- def remove(self):
- self.do_remove()
-
-
-class OpenResourceNamespaceManager(NamespaceManager):
- """A NamespaceManager where read/write operations require opening/
- closing of a resource which is possibly mutexed.
-
- """
- def __init__(self, namespace):
- NamespaceManager.__init__(self, namespace)
- self.access_lock = self.get_access_lock()
- self.openers = 0
- self.mutex = _threading.Lock()
-
- def get_access_lock(self):
- raise NotImplementedError()
-
- def do_open(self, flags):
- raise NotImplementedError()
-
- def do_close(self):
- raise NotImplementedError()
-
- def acquire_read_lock(self):
- self.access_lock.acquire_read_lock()
- try:
- self.open('r', checkcount = True)
- except:
- self.access_lock.release_read_lock()
- raise
-
- def release_read_lock(self):
- try:
- self.close(checkcount = True)
- finally:
- self.access_lock.release_read_lock()
-
- def acquire_write_lock(self, wait=True):
- r = self.access_lock.acquire_write_lock(wait)
- try:
- if (wait or r):
- self.open('c', checkcount = True)
- return r
- except:
- self.access_lock.release_write_lock()
- raise
-
- def release_write_lock(self):
- try:
- self.close(checkcount=True)
- finally:
- self.access_lock.release_write_lock()
-
- def open(self, flags, checkcount=False):
- self.mutex.acquire()
- try:
- if checkcount:
- if self.openers == 0:
- self.do_open(flags)
- self.openers += 1
- else:
- self.do_open(flags)
- self.openers = 1
- finally:
- self.mutex.release()
-
- def close(self, checkcount=False):
- self.mutex.acquire()
- try:
- if checkcount:
- self.openers -= 1
- if self.openers == 0:
- self.do_close()
- else:
- if self.openers > 0:
- self.do_close()
- self.openers = 0
- finally:
- self.mutex.release()
-
- def remove(self):
- self.access_lock.acquire_write_lock()
- try:
- self.close(checkcount=False)
- self.do_remove()
- finally:
- self.access_lock.release_write_lock()
-
-class Value(object):
- __slots__ = 'key', 'createfunc', 'expiretime', 'expire_argument', 'starttime', 'storedtime',\
- 'namespace'
-
- def __init__(self, key, namespace, createfunc=None, expiretime=None, starttime=None):
- self.key = key
- self.createfunc = createfunc
- self.expire_argument = expiretime
- self.starttime = starttime
- self.storedtime = -1
- self.namespace = namespace
-
- def has_value(self):
- """return true if the container has a value stored.
-
- This is regardless of it being expired or not.
-
- """
- self.namespace.acquire_read_lock()
- try:
- return self.namespace.has_key(self.key)
- finally:
- self.namespace.release_read_lock()
-
- def can_have_value(self):
- return self.has_current_value() or self.createfunc is not None
-
- def has_current_value(self):
- self.namespace.acquire_read_lock()
- try:
- has_value = self.namespace.has_key(self.key)
- if has_value:
- try:
- stored, expired, value = self._get_value()
- return not self._is_expired(stored, expired)
- except KeyError:
- pass
- return False
- finally:
- self.namespace.release_read_lock()
-
- def _is_expired(self, storedtime, expiretime):
- """Return true if this container's value is expired."""
- return (
- (
- self.starttime is not None and
- storedtime < self.starttime
- )
- or
- (
- expiretime is not None and
- time.time() >= expiretime + storedtime
- )
- )
-
- def get_value(self):
- self.namespace.acquire_read_lock()
- try:
- has_value = self.has_value()
- if has_value:
- try:
- stored, expired, value = self._get_value()
- if not self._is_expired(stored, expired):
- return value
- except KeyError:
- # guard against un-mutexed backends raising KeyError
- has_value = False
-
- if not self.createfunc:
- raise KeyError(self.key)
- finally:
- self.namespace.release_read_lock()
-
- has_createlock = False
- creation_lock = self.namespace.get_creation_lock(self.key)
- if has_value:
- if not creation_lock.acquire(wait=False):
- debug("get_value returning old value while new one is created")
- return value
- else:
- debug("lock_creatfunc (didnt wait)")
- has_createlock = True
-
- if not has_createlock:
- debug("lock_createfunc (waiting)")
- creation_lock.acquire()
- debug("lock_createfunc (waited)")
-
- try:
- # see if someone created the value already
- self.namespace.acquire_read_lock()
- try:
- if self.has_value():
- try:
- stored, expired, value = self._get_value()
- if not self._is_expired(stored, expired):
- return value
- except KeyError:
- # guard against un-mutexed backends raising KeyError
- pass
- finally:
- self.namespace.release_read_lock()
-
- debug("get_value creating new value")
- v = self.createfunc()
- self.set_value(v)
- return v
- finally:
- creation_lock.release()
- debug("released create lock")
-
- def _get_value(self):
- value = self.namespace[self.key]
- try:
- stored, expired, value = value
- except ValueError:
- if not len(value) == 2:
- raise
- # Old format: upgrade
- stored, value = value
- expired = self.expire_argument
- debug("get_value upgrading time %r expire time %r", stored, self.expire_argument)
- self.namespace.release_read_lock()
- self.set_value(value, stored)
- self.namespace.acquire_read_lock()
- except TypeError:
- # occurs when the value is None. memcached
- # may yank the rug from under us in which case
- # that's the result
- raise KeyError(self.key)
- return stored, expired, value
-
- def set_value(self, value, storedtime=None):
- self.namespace.acquire_write_lock()
- try:
- if storedtime is None:
- storedtime = time.time()
- debug("set_value stored time %r expire time %r", storedtime, self.expire_argument)
- self.namespace.set_value(self.key, (storedtime, self.expire_argument, value))
- finally:
- self.namespace.release_write_lock()
-
- def clear_value(self):
- self.namespace.acquire_write_lock()
- try:
- debug("clear_value")
- if self.namespace.has_key(self.key):
- try:
- del self.namespace[self.key]
- except KeyError:
- # guard against un-mutexed backends raising KeyError
- pass
- self.storedtime = -1
- finally:
- self.namespace.release_write_lock()
-
-class AbstractDictionaryNSManager(NamespaceManager):
- """A subclassable NamespaceManager that places data in a dictionary.
-
- Subclasses should provide a "dictionary" attribute or descriptor
- which returns a dict-like object. The dictionary will store keys
- that are local to the "namespace" attribute of this manager, so
- ensure that the dictionary will not be used by any other namespace.
-
- e.g.::
-
- import collections
- cached_data = collections.defaultdict(dict)
-
- class MyDictionaryManager(AbstractDictionaryNSManager):
- def __init__(self, namespace):
- AbstractDictionaryNSManager.__init__(self, namespace)
- self.dictionary = cached_data[self.namespace]
-
- The above stores data in a global dictionary called "cached_data",
- which is structured as a dictionary of dictionaries, keyed
- first on namespace name to a sub-dictionary, then on actual
- cache key to value.
-
- """
-
- def get_creation_lock(self, key):
- return NameLock(
- identifier="memorynamespace/funclock/%s/%s" % (self.namespace, key),
- reentrant=True
- )
-
- def __getitem__(self, key):
- return self.dictionary[key]
-
- def __contains__(self, key):
- return self.dictionary.__contains__(key)
-
- def has_key(self, key):
- return self.dictionary.__contains__(key)
-
- def __setitem__(self, key, value):
- self.dictionary[key] = value
-
- def __delitem__(self, key):
- del self.dictionary[key]
-
- def do_remove(self):
- self.dictionary.clear()
-
- def keys(self):
- return self.dictionary.keys()
-
-class MemoryNamespaceManager(AbstractDictionaryNSManager):
- namespaces = util.SyncDict()
-
- def __init__(self, namespace, **kwargs):
- AbstractDictionaryNSManager.__init__(self, namespace)
- self.dictionary = MemoryNamespaceManager.namespaces.get(self.namespace,
- dict)
-
-class DBMNamespaceManager(OpenResourceNamespaceManager):
- def __init__(self, namespace, dbmmodule=None, data_dir=None,
- dbm_dir=None, lock_dir=None, digest_filenames=True, **kwargs):
- self.digest_filenames = digest_filenames
-
- if not dbm_dir and not data_dir:
- raise MissingCacheParameter("data_dir or dbm_dir is required")
- elif dbm_dir:
- self.dbm_dir = dbm_dir
- else:
- self.dbm_dir = data_dir + "/container_dbm"
- util.verify_directory(self.dbm_dir)
-
- if not lock_dir and not data_dir:
- raise MissingCacheParameter("data_dir or lock_dir is required")
- elif lock_dir:
- self.lock_dir = lock_dir
- else:
- self.lock_dir = data_dir + "/container_dbm_lock"
- util.verify_directory(self.lock_dir)
-
- self.dbmmodule = dbmmodule or anydbm
-
- self.dbm = None
- OpenResourceNamespaceManager.__init__(self, namespace)
-
- self.file = util.encoded_path(root= self.dbm_dir,
- identifiers=[self.namespace],
- extension='.dbm',
- digest_filenames=self.digest_filenames)
-
- debug("data file %s", self.file)
- self._checkfile()
-
- def get_access_lock(self):
- return file_synchronizer(identifier=self.namespace,
- lock_dir=self.lock_dir)
-
- def get_creation_lock(self, key):
- return file_synchronizer(
- identifier = "dbmcontainer/funclock/%s" % self.namespace,
- lock_dir=self.lock_dir
- )
-
- def file_exists(self, file):
- if os.access(file, os.F_OK):
- return True
- else:
- for ext in ('db', 'dat', 'pag', 'dir'):
- if os.access(file + os.extsep + ext, os.F_OK):
- return True
-
- return False
-
- def _checkfile(self):
- if not self.file_exists(self.file):
- g = self.dbmmodule.open(self.file, 'c')
- g.close()
-
- def get_filenames(self):
- list = []
- if os.access(self.file, os.F_OK):
- list.append(self.file)
-
- for ext in ('pag', 'dir', 'db', 'dat'):
- if os.access(self.file + os.extsep + ext, os.F_OK):
- list.append(self.file + os.extsep + ext)
- return list
-
- def do_open(self, flags):
- debug("opening dbm file %s", self.file)
- try:
- self.dbm = self.dbmmodule.open(self.file, flags)
- except:
- self._checkfile()
- self.dbm = self.dbmmodule.open(self.file, flags)
-
- def do_close(self):
- if self.dbm is not None:
- debug("closing dbm file %s", self.file)
- self.dbm.close()
-
- def do_remove(self):
- for f in self.get_filenames():
- os.remove(f)
-
- def __getitem__(self, key):
- return cPickle.loads(self.dbm[key])
-
- def __contains__(self, key):
- return self.dbm.has_key(key)
-
- def __setitem__(self, key, value):
- self.dbm[key] = cPickle.dumps(value)
-
- def __delitem__(self, key):
- del self.dbm[key]
-
- def keys(self):
- return self.dbm.keys()
-
-
-class FileNamespaceManager(OpenResourceNamespaceManager):
- def __init__(self, namespace, data_dir=None, file_dir=None, lock_dir=None,
- digest_filenames=True, **kwargs):
- self.digest_filenames = digest_filenames
-
- if not file_dir and not data_dir:
- raise MissingCacheParameter("data_dir or file_dir is required")
- elif file_dir:
- self.file_dir = file_dir
- else:
- self.file_dir = data_dir + "/container_file"
- util.verify_directory(self.file_dir)
-
- if not lock_dir and not data_dir:
- raise MissingCacheParameter("data_dir or lock_dir is required")
- elif lock_dir:
- self.lock_dir = lock_dir
- else:
- self.lock_dir = data_dir + "/container_file_lock"
- util.verify_directory(self.lock_dir)
- OpenResourceNamespaceManager.__init__(self, namespace)
-
- self.file = util.encoded_path(root=self.file_dir,
- identifiers=[self.namespace],
- extension='.cache',
- digest_filenames=self.digest_filenames)
- self.hash = {}
-
- debug("data file %s", self.file)
-
- def get_access_lock(self):
- return file_synchronizer(identifier=self.namespace,
- lock_dir=self.lock_dir)
-
- def get_creation_lock(self, key):
- return file_synchronizer(
- identifier = "filecontainer/funclock/%s" % self.namespace,
- lock_dir = self.lock_dir
- )
-
- def file_exists(self, file):
- return os.access(file, os.F_OK)
-
- def do_open(self, flags):
- if self.file_exists(self.file):
- fh = open(self.file, 'rb')
- try:
- self.hash = cPickle.load(fh)
- except (IOError, OSError, EOFError, cPickle.PickleError, ValueError):
- pass
- fh.close()
-
- self.flags = flags
-
- def do_close(self):
- if self.flags == 'c' or self.flags == 'w':
- fh = open(self.file, 'wb')
- cPickle.dump(self.hash, fh)
- fh.close()
-
- self.hash = {}
- self.flags = None
-
- def do_remove(self):
- try:
- os.remove(self.file)
- except OSError, err:
- # for instance, because we haven't yet used this cache,
- # but client code has asked for a clear() operation...
- pass
- self.hash = {}
-
- def __getitem__(self, key):
- return self.hash[key]
-
- def __contains__(self, key):
- return self.hash.has_key(key)
-
- def __setitem__(self, key, value):
- self.hash[key] = value
-
- def __delitem__(self, key):
- del self.hash[key]
-
- def keys(self):
- return self.hash.keys()
-
-
-#### legacy stuff to support the old "Container" class interface
-
-namespace_classes = {}
-
-ContainerContext = dict
-
-class ContainerMeta(type):
- def __init__(cls, classname, bases, dict_):
- namespace_classes[cls] = cls.namespace_class
- return type.__init__(cls, classname, bases, dict_)
- def __call__(self, key, context, namespace, createfunc=None,
- expiretime=None, starttime=None, **kwargs):
- if namespace in context:
- ns = context[namespace]
- else:
- nscls = namespace_classes[self]
- context[namespace] = ns = nscls(namespace, **kwargs)
- return Value(key, ns, createfunc=createfunc,
- expiretime=expiretime, starttime=starttime)
-
-class Container(object):
- __metaclass__ = ContainerMeta
- namespace_class = NamespaceManager
-
-class FileContainer(Container):
- namespace_class = FileNamespaceManager
-
-class MemoryContainer(Container):
- namespace_class = MemoryNamespaceManager
-
-class DBMContainer(Container):
- namespace_class = DBMNamespaceManager
-
-DbmContainer = DBMContainer
diff --git a/module/lib/beaker/converters.py b/module/lib/beaker/converters.py
deleted file mode 100644
index f0ad34963..000000000
--- a/module/lib/beaker/converters.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
-# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
-def asbool(obj):
- if isinstance(obj, (str, unicode)):
- obj = obj.strip().lower()
- if obj in ['true', 'yes', 'on', 'y', 't', '1']:
- return True
- elif obj in ['false', 'no', 'off', 'n', 'f', '0']:
- return False
- else:
- raise ValueError(
- "String is not true/false: %r" % obj)
- return bool(obj)
-
-def aslist(obj, sep=None, strip=True):
- if isinstance(obj, (str, unicode)):
- lst = obj.split(sep)
- if strip:
- lst = [v.strip() for v in lst]
- return lst
- elif isinstance(obj, (list, tuple)):
- return obj
- elif obj is None:
- return []
- else:
- return [obj]
diff --git a/module/lib/beaker/crypto/__init__.py b/module/lib/beaker/crypto/__init__.py
deleted file mode 100644
index 3e26b0c13..000000000
--- a/module/lib/beaker/crypto/__init__.py
+++ /dev/null
@@ -1,40 +0,0 @@
-from warnings import warn
-
-from beaker.crypto.pbkdf2 import PBKDF2, strxor
-from beaker.crypto.util import hmac, sha1, hmac_sha1, md5
-from beaker import util
-
-keyLength = None
-
-if util.jython:
- try:
- from beaker.crypto.jcecrypto import getKeyLength, aesEncrypt
- keyLength = getKeyLength()
- except ImportError:
- pass
-else:
- try:
- from beaker.crypto.pycrypto import getKeyLength, aesEncrypt, aesDecrypt
- keyLength = getKeyLength()
- except ImportError:
- pass
-
-if not keyLength:
- has_aes = False
-else:
- has_aes = True
-
-if has_aes and keyLength < 32:
- warn('Crypto implementation only supports key lengths up to %d bits. '
- 'Generated session cookies may be incompatible with other '
- 'environments' % (keyLength * 8))
-
-
-def generateCryptoKeys(master_key, salt, iterations):
- # NB: We XOR parts of the keystream into the randomly-generated parts, just
- # in case os.urandom() isn't as random as it should be. Note that if
- # os.urandom() returns truly random data, this will have no effect on the
- # overall security.
- keystream = PBKDF2(master_key, salt, iterations=iterations)
- cipher_key = keystream.read(keyLength)
- return cipher_key
diff --git a/module/lib/beaker/crypto/jcecrypto.py b/module/lib/beaker/crypto/jcecrypto.py
deleted file mode 100644
index 4062d513e..000000000
--- a/module/lib/beaker/crypto/jcecrypto.py
+++ /dev/null
@@ -1,30 +0,0 @@
-"""
-Encryption module that uses the Java Cryptography Extensions (JCE).
-
-Note that in default installations of the Java Runtime Environment, the
-maximum key length is limited to 128 bits due to US export
-restrictions. This makes the generated keys incompatible with the ones
-generated by pycryptopp, which has no such restrictions. To fix this,
-download the "Unlimited Strength Jurisdiction Policy Files" from Sun,
-which will allow encryption using 256 bit AES keys.
-"""
-from javax.crypto import Cipher
-from javax.crypto.spec import SecretKeySpec, IvParameterSpec
-
-import jarray
-
-# Initialization vector filled with zeros
-_iv = IvParameterSpec(jarray.zeros(16, 'b'))
-
-def aesEncrypt(data, key):
- cipher = Cipher.getInstance('AES/CTR/NoPadding')
- skeySpec = SecretKeySpec(key, 'AES')
- cipher.init(Cipher.ENCRYPT_MODE, skeySpec, _iv)
- return cipher.doFinal(data).tostring()
-
-# magic.
-aesDecrypt = aesEncrypt
-
-def getKeyLength():
- maxlen = Cipher.getMaxAllowedKeyLength('AES/CTR/NoPadding')
- return min(maxlen, 256) / 8
diff --git a/module/lib/beaker/crypto/pbkdf2.py b/module/lib/beaker/crypto/pbkdf2.py
deleted file mode 100644
index 96dc5fbb2..000000000
--- a/module/lib/beaker/crypto/pbkdf2.py
+++ /dev/null
@@ -1,342 +0,0 @@
-#!/usr/bin/python
-# -*- coding: ascii -*-
-###########################################################################
-# PBKDF2.py - PKCS#5 v2.0 Password-Based Key Derivation
-#
-# Copyright (C) 2007 Dwayne C. Litzenberger <dlitz@dlitz.net>
-# All rights reserved.
-#
-# Permission to use, copy, modify, and distribute this software and its
-# documentation for any purpose and without fee is hereby granted,
-# provided that the above copyright notice appear in all copies and that
-# both that copyright notice and this permission notice appear in
-# supporting documentation.
-#
-# THE AUTHOR PROVIDES THIS SOFTWARE ``AS IS'' AND ANY EXPRESSED OR
-# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-# Country of origin: Canada
-#
-###########################################################################
-# Sample PBKDF2 usage:
-# from Crypto.Cipher import AES
-# from PBKDF2 import PBKDF2
-# import os
-#
-# salt = os.urandom(8) # 64-bit salt
-# key = PBKDF2("This passphrase is a secret.", salt).read(32) # 256-bit key
-# iv = os.urandom(16) # 128-bit IV
-# cipher = AES.new(key, AES.MODE_CBC, iv)
-# ...
-#
-# Sample crypt() usage:
-# from PBKDF2 import crypt
-# pwhash = crypt("secret")
-# alleged_pw = raw_input("Enter password: ")
-# if pwhash == crypt(alleged_pw, pwhash):
-# print "Password good"
-# else:
-# print "Invalid password"
-#
-###########################################################################
-# History:
-#
-# 2007-07-27 Dwayne C. Litzenberger <dlitz@dlitz.net>
-# - Initial Release (v1.0)
-#
-# 2007-07-31 Dwayne C. Litzenberger <dlitz@dlitz.net>
-# - Bugfix release (v1.1)
-# - SECURITY: The PyCrypto XOR cipher (used, if available, in the _strxor
-# function in the previous release) silently truncates all keys to 64
-# bytes. The way it was used in the previous release, this would only be
-# problem if the pseudorandom function that returned values larger than
-# 64 bytes (so SHA1, SHA256 and SHA512 are fine), but I don't like
-# anything that silently reduces the security margin from what is
-# expected.
-#
-###########################################################################
-
-__version__ = "1.1"
-
-from struct import pack
-from binascii import b2a_hex
-from random import randint
-
-from base64 import b64encode
-
-from beaker.crypto.util import hmac as HMAC, hmac_sha1 as SHA1
-
-def strxor(a, b):
- return "".join([chr(ord(x) ^ ord(y)) for (x, y) in zip(a, b)])
-
-class PBKDF2(object):
- """PBKDF2.py : PKCS#5 v2.0 Password-Based Key Derivation
-
- This implementation takes a passphrase and a salt (and optionally an
- iteration count, a digest module, and a MAC module) and provides a
- file-like object from which an arbitrarily-sized key can be read.
-
- If the passphrase and/or salt are unicode objects, they are encoded as
- UTF-8 before they are processed.
-
- The idea behind PBKDF2 is to derive a cryptographic key from a
- passphrase and a salt.
-
- PBKDF2 may also be used as a strong salted password hash. The
- 'crypt' function is provided for that purpose.
-
- Remember: Keys generated using PBKDF2 are only as strong as the
- passphrases they are derived from.
- """
-
- def __init__(self, passphrase, salt, iterations=1000,
- digestmodule=SHA1, macmodule=HMAC):
- if not callable(macmodule):
- macmodule = macmodule.new
- self.__macmodule = macmodule
- self.__digestmodule = digestmodule
- self._setup(passphrase, salt, iterations, self._pseudorandom)
-
- def _pseudorandom(self, key, msg):
- """Pseudorandom function. e.g. HMAC-SHA1"""
- return self.__macmodule(key=key, msg=msg,
- digestmod=self.__digestmodule).digest()
-
- def read(self, bytes):
- """Read the specified number of key bytes."""
- if self.closed:
- raise ValueError("file-like object is closed")
-
- size = len(self.__buf)
- blocks = [self.__buf]
- i = self.__blockNum
- while size < bytes:
- i += 1
- if i > 0xffffffff:
- # We could return "" here, but
- raise OverflowError("derived key too long")
- block = self.__f(i)
- blocks.append(block)
- size += len(block)
- buf = "".join(blocks)
- retval = buf[:bytes]
- self.__buf = buf[bytes:]
- self.__blockNum = i
- return retval
-
- def __f(self, i):
- # i must fit within 32 bits
- assert (1 <= i <= 0xffffffff)
- U = self.__prf(self.__passphrase, self.__salt + pack("!L", i))
- result = U
- for j in xrange(2, 1+self.__iterations):
- U = self.__prf(self.__passphrase, U)
- result = strxor(result, U)
- return result
-
- def hexread(self, octets):
- """Read the specified number of octets. Return them as hexadecimal.
-
- Note that len(obj.hexread(n)) == 2*n.
- """
- return b2a_hex(self.read(octets))
-
- def _setup(self, passphrase, salt, iterations, prf):
- # Sanity checks:
-
- # passphrase and salt must be str or unicode (in the latter
- # case, we convert to UTF-8)
- if isinstance(passphrase, unicode):
- passphrase = passphrase.encode("UTF-8")
- if not isinstance(passphrase, str):
- raise TypeError("passphrase must be str or unicode")
- if isinstance(salt, unicode):
- salt = salt.encode("UTF-8")
- if not isinstance(salt, str):
- raise TypeError("salt must be str or unicode")
-
- # iterations must be an integer >= 1
- if not isinstance(iterations, (int, long)):
- raise TypeError("iterations must be an integer")
- if iterations < 1:
- raise ValueError("iterations must be at least 1")
-
- # prf must be callable
- if not callable(prf):
- raise TypeError("prf must be callable")
-
- self.__passphrase = passphrase
- self.__salt = salt
- self.__iterations = iterations
- self.__prf = prf
- self.__blockNum = 0
- self.__buf = ""
- self.closed = False
-
- def close(self):
- """Close the stream."""
- if not self.closed:
- del self.__passphrase
- del self.__salt
- del self.__iterations
- del self.__prf
- del self.__blockNum
- del self.__buf
- self.closed = True
-
-def crypt(word, salt=None, iterations=None):
- """PBKDF2-based unix crypt(3) replacement.
-
- The number of iterations specified in the salt overrides the 'iterations'
- parameter.
-
- The effective hash length is 192 bits.
- """
-
- # Generate a (pseudo-)random salt if the user hasn't provided one.
- if salt is None:
- salt = _makesalt()
-
- # salt must be a string or the us-ascii subset of unicode
- if isinstance(salt, unicode):
- salt = salt.encode("us-ascii")
- if not isinstance(salt, str):
- raise TypeError("salt must be a string")
-
- # word must be a string or unicode (in the latter case, we convert to UTF-8)
- if isinstance(word, unicode):
- word = word.encode("UTF-8")
- if not isinstance(word, str):
- raise TypeError("word must be a string or unicode")
-
- # Try to extract the real salt and iteration count from the salt
- if salt.startswith("$p5k2$"):
- (iterations, salt, dummy) = salt.split("$")[2:5]
- if iterations == "":
- iterations = 400
- else:
- converted = int(iterations, 16)
- if iterations != "%x" % converted: # lowercase hex, minimum digits
- raise ValueError("Invalid salt")
- iterations = converted
- if not (iterations >= 1):
- raise ValueError("Invalid salt")
-
- # Make sure the salt matches the allowed character set
- allowed = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789./"
- for ch in salt:
- if ch not in allowed:
- raise ValueError("Illegal character %r in salt" % (ch,))
-
- if iterations is None or iterations == 400:
- iterations = 400
- salt = "$p5k2$$" + salt
- else:
- salt = "$p5k2$%x$%s" % (iterations, salt)
- rawhash = PBKDF2(word, salt, iterations).read(24)
- return salt + "$" + b64encode(rawhash, "./")
-
-# Add crypt as a static method of the PBKDF2 class
-# This makes it easier to do "from PBKDF2 import PBKDF2" and still use
-# crypt.
-PBKDF2.crypt = staticmethod(crypt)
-
-def _makesalt():
- """Return a 48-bit pseudorandom salt for crypt().
-
- This function is not suitable for generating cryptographic secrets.
- """
- binarysalt = "".join([pack("@H", randint(0, 0xffff)) for i in range(3)])
- return b64encode(binarysalt, "./")
-
-def test_pbkdf2():
- """Module self-test"""
- from binascii import a2b_hex
-
- #
- # Test vectors from RFC 3962
- #
-
- # Test 1
- result = PBKDF2("password", "ATHENA.MIT.EDUraeburn", 1).read(16)
- expected = a2b_hex("cdedb5281bb2f801565a1122b2563515")
- if result != expected:
- raise RuntimeError("self-test failed")
-
- # Test 2
- result = PBKDF2("password", "ATHENA.MIT.EDUraeburn", 1200).hexread(32)
- expected = ("5c08eb61fdf71e4e4ec3cf6ba1f5512b"
- "a7e52ddbc5e5142f708a31e2e62b1e13")
- if result != expected:
- raise RuntimeError("self-test failed")
-
- # Test 3
- result = PBKDF2("X"*64, "pass phrase equals block size", 1200).hexread(32)
- expected = ("139c30c0966bc32ba55fdbf212530ac9"
- "c5ec59f1a452f5cc9ad940fea0598ed1")
- if result != expected:
- raise RuntimeError("self-test failed")
-
- # Test 4
- result = PBKDF2("X"*65, "pass phrase exceeds block size", 1200).hexread(32)
- expected = ("9ccad6d468770cd51b10e6a68721be61"
- "1a8b4d282601db3b36be9246915ec82a")
- if result != expected:
- raise RuntimeError("self-test failed")
-
- #
- # Other test vectors
- #
-
- # Chunked read
- f = PBKDF2("kickstart", "workbench", 256)
- result = f.read(17)
- result += f.read(17)
- result += f.read(1)
- result += f.read(2)
- result += f.read(3)
- expected = PBKDF2("kickstart", "workbench", 256).read(40)
- if result != expected:
- raise RuntimeError("self-test failed")
-
- #
- # crypt() test vectors
- #
-
- # crypt 1
- result = crypt("cloadm", "exec")
- expected = '$p5k2$$exec$r1EWMCMk7Rlv3L/RNcFXviDefYa0hlql'
- if result != expected:
- raise RuntimeError("self-test failed")
-
- # crypt 2
- result = crypt("gnu", '$p5k2$c$u9HvcT4d$.....')
- expected = '$p5k2$c$u9HvcT4d$Sd1gwSVCLZYAuqZ25piRnbBEoAesaa/g'
- if result != expected:
- raise RuntimeError("self-test failed")
-
- # crypt 3
- result = crypt("dcl", "tUsch7fU", iterations=13)
- expected = "$p5k2$d$tUsch7fU$nqDkaxMDOFBeJsTSfABsyn.PYUXilHwL"
- if result != expected:
- raise RuntimeError("self-test failed")
-
- # crypt 4 (unicode)
- result = crypt(u'\u0399\u03c9\u03b1\u03bd\u03bd\u03b7\u03c2',
- '$p5k2$$KosHgqNo$9mjN8gqjt02hDoP0c2J0ABtLIwtot8cQ')
- expected = '$p5k2$$KosHgqNo$9mjN8gqjt02hDoP0c2J0ABtLIwtot8cQ'
- if result != expected:
- raise RuntimeError("self-test failed")
-
-if __name__ == '__main__':
- test_pbkdf2()
-
-# vim:set ts=4 sw=4 sts=4 expandtab:
diff --git a/module/lib/beaker/crypto/pycrypto.py b/module/lib/beaker/crypto/pycrypto.py
deleted file mode 100644
index a3eb4d9db..000000000
--- a/module/lib/beaker/crypto/pycrypto.py
+++ /dev/null
@@ -1,31 +0,0 @@
-"""Encryption module that uses pycryptopp or pycrypto"""
-try:
- # Pycryptopp is preferred over Crypto because Crypto has had
- # various periods of not being maintained, and pycryptopp uses
- # the Crypto++ library which is generally considered the 'gold standard'
- # of crypto implementations
- from pycryptopp.cipher import aes
-
- def aesEncrypt(data, key):
- cipher = aes.AES(key)
- return cipher.process(data)
-
- # magic.
- aesDecrypt = aesEncrypt
-
-except ImportError:
- from Crypto.Cipher import AES
-
- def aesEncrypt(data, key):
- cipher = AES.new(key)
-
- data = data + (" " * (16 - (len(data) % 16)))
- return cipher.encrypt(data)
-
- def aesDecrypt(data, key):
- cipher = AES.new(key)
-
- return cipher.decrypt(data).rstrip()
-
-def getKeyLength():
- return 32
diff --git a/module/lib/beaker/crypto/util.py b/module/lib/beaker/crypto/util.py
deleted file mode 100644
index d97e8ce6f..000000000
--- a/module/lib/beaker/crypto/util.py
+++ /dev/null
@@ -1,30 +0,0 @@
-from warnings import warn
-from beaker import util
-
-
-try:
- # Use PyCrypto (if available)
- from Crypto.Hash import HMAC as hmac, SHA as hmac_sha1
- sha1 = hmac_sha1.new
-
-except ImportError:
-
- # PyCrypto not available. Use the Python standard library.
- import hmac
-
- # When using the stdlib, we have to make sure the hmac version and sha
- # version are compatible
- if util.py24:
- from sha import sha as sha1
- import sha as hmac_sha1
- else:
- # NOTE: We have to use the callable with hashlib (hashlib.sha1),
- # otherwise hmac only accepts the sha module object itself
- from hashlib import sha1
- hmac_sha1 = sha1
-
-
-if util.py24:
- from md5 import md5
-else:
- from hashlib import md5
diff --git a/module/lib/beaker/exceptions.py b/module/lib/beaker/exceptions.py
deleted file mode 100644
index cc0eed286..000000000
--- a/module/lib/beaker/exceptions.py
+++ /dev/null
@@ -1,24 +0,0 @@
-"""Beaker exception classes"""
-
-class BeakerException(Exception):
- pass
-
-
-class CreationAbortedError(Exception):
- """Deprecated."""
-
-
-class InvalidCacheBackendError(BeakerException, ImportError):
- pass
-
-
-class MissingCacheParameter(BeakerException):
- pass
-
-
-class LockError(BeakerException):
- pass
-
-
-class InvalidCryptoBackendError(BeakerException):
- pass
diff --git a/module/lib/beaker/ext/__init__.py b/module/lib/beaker/ext/__init__.py
deleted file mode 100644
index e69de29bb..000000000
--- a/module/lib/beaker/ext/__init__.py
+++ /dev/null
diff --git a/module/lib/beaker/ext/database.py b/module/lib/beaker/ext/database.py
deleted file mode 100644
index 701e6f7d2..000000000
--- a/module/lib/beaker/ext/database.py
+++ /dev/null
@@ -1,165 +0,0 @@
-import cPickle
-import logging
-import pickle
-from datetime import datetime
-
-from beaker.container import OpenResourceNamespaceManager, Container
-from beaker.exceptions import InvalidCacheBackendError, MissingCacheParameter
-from beaker.synchronization import file_synchronizer, null_synchronizer
-from beaker.util import verify_directory, SyncDict
-
-log = logging.getLogger(__name__)
-
-sa = None
-pool = None
-types = None
-
-class DatabaseNamespaceManager(OpenResourceNamespaceManager):
- metadatas = SyncDict()
- tables = SyncDict()
-
- @classmethod
- def _init_dependencies(cls):
- global sa, pool, types
- if sa is not None:
- return
- try:
- import sqlalchemy as sa
- import sqlalchemy.pool as pool
- from sqlalchemy import types
- except ImportError:
- raise InvalidCacheBackendError("Database cache backend requires "
- "the 'sqlalchemy' library")
-
- def __init__(self, namespace, url=None, sa_opts=None, optimistic=False,
- table_name='beaker_cache', data_dir=None, lock_dir=None,
- **params):
- """Creates a database namespace manager
-
- ``url``
- SQLAlchemy compliant db url
- ``sa_opts``
- A dictionary of SQLAlchemy keyword options to initialize the engine
- with.
- ``optimistic``
- Use optimistic session locking, note that this will result in an
- additional select when updating a cache value to compare version
- numbers.
- ``table_name``
- The table name to use in the database for the cache.
- """
- OpenResourceNamespaceManager.__init__(self, namespace)
-
- if sa_opts is None:
- sa_opts = params
-
- if lock_dir:
- self.lock_dir = lock_dir
- elif data_dir:
- self.lock_dir = data_dir + "/container_db_lock"
- if self.lock_dir:
- verify_directory(self.lock_dir)
-
- # Check to see if the table's been created before
- url = url or sa_opts['sa.url']
- table_key = url + table_name
- def make_cache():
- # Check to see if we have a connection pool open already
- meta_key = url + table_name
- def make_meta():
- # SQLAlchemy pops the url, this ensures it sticks around
- # later
- sa_opts['sa.url'] = url
- engine = sa.engine_from_config(sa_opts, 'sa.')
- meta = sa.MetaData()
- meta.bind = engine
- return meta
- meta = DatabaseNamespaceManager.metadatas.get(meta_key, make_meta)
- # Create the table object and cache it now
- cache = sa.Table(table_name, meta,
- sa.Column('id', types.Integer, primary_key=True),
- sa.Column('namespace', types.String(255), nullable=False),
- sa.Column('accessed', types.DateTime, nullable=False),
- sa.Column('created', types.DateTime, nullable=False),
- sa.Column('data', types.PickleType, nullable=False),
- sa.UniqueConstraint('namespace')
- )
- cache.create(checkfirst=True)
- return cache
- self.hash = {}
- self._is_new = False
- self.loaded = False
- self.cache = DatabaseNamespaceManager.tables.get(table_key, make_cache)
-
- def get_access_lock(self):
- return null_synchronizer()
-
- def get_creation_lock(self, key):
- return file_synchronizer(
- identifier ="databasecontainer/funclock/%s" % self.namespace,
- lock_dir = self.lock_dir)
-
- def do_open(self, flags):
- # If we already loaded the data, don't bother loading it again
- if self.loaded:
- self.flags = flags
- return
-
- cache = self.cache
- result = sa.select([cache.c.data],
- cache.c.namespace==self.namespace
- ).execute().fetchone()
- if not result:
- self._is_new = True
- self.hash = {}
- else:
- self._is_new = False
- try:
- self.hash = result['data']
- except (IOError, OSError, EOFError, cPickle.PickleError,
- pickle.PickleError):
- log.debug("Couln't load pickle data, creating new storage")
- self.hash = {}
- self._is_new = True
- self.flags = flags
- self.loaded = True
-
- def do_close(self):
- if self.flags is not None and (self.flags == 'c' or self.flags == 'w'):
- cache = self.cache
- if self._is_new:
- cache.insert().execute(namespace=self.namespace, data=self.hash,
- accessed=datetime.now(),
- created=datetime.now())
- self._is_new = False
- else:
- cache.update(cache.c.namespace==self.namespace).execute(
- data=self.hash, accessed=datetime.now())
- self.flags = None
-
- def do_remove(self):
- cache = self.cache
- cache.delete(cache.c.namespace==self.namespace).execute()
- self.hash = {}
-
- # We can retain the fact that we did a load attempt, but since the
- # file is gone this will be a new namespace should it be saved.
- self._is_new = True
-
- def __getitem__(self, key):
- return self.hash[key]
-
- def __contains__(self, key):
- return self.hash.has_key(key)
-
- def __setitem__(self, key, value):
- self.hash[key] = value
-
- def __delitem__(self, key):
- del self.hash[key]
-
- def keys(self):
- return self.hash.keys()
-
-class DatabaseContainer(Container):
- namespace_manager = DatabaseNamespaceManager
diff --git a/module/lib/beaker/ext/google.py b/module/lib/beaker/ext/google.py
deleted file mode 100644
index dd8380d7f..000000000
--- a/module/lib/beaker/ext/google.py
+++ /dev/null
@@ -1,120 +0,0 @@
-import cPickle
-import logging
-from datetime import datetime
-
-from beaker.container import OpenResourceNamespaceManager, Container
-from beaker.exceptions import InvalidCacheBackendError
-from beaker.synchronization import null_synchronizer
-
-log = logging.getLogger(__name__)
-
-db = None
-
-class GoogleNamespaceManager(OpenResourceNamespaceManager):
- tables = {}
-
- @classmethod
- def _init_dependencies(cls):
- global db
- if db is not None:
- return
- try:
- db = __import__('google.appengine.ext.db').appengine.ext.db
- except ImportError:
- raise InvalidCacheBackendError("Datastore cache backend requires the "
- "'google.appengine.ext' library")
-
- def __init__(self, namespace, table_name='beaker_cache', **params):
- """Creates a datastore namespace manager"""
- OpenResourceNamespaceManager.__init__(self, namespace)
-
- def make_cache():
- table_dict = dict(created=db.DateTimeProperty(),
- accessed=db.DateTimeProperty(),
- data=db.BlobProperty())
- table = type(table_name, (db.Model,), table_dict)
- return table
- self.table_name = table_name
- self.cache = GoogleNamespaceManager.tables.setdefault(table_name, make_cache())
- self.hash = {}
- self._is_new = False
- self.loaded = False
- self.log_debug = logging.DEBUG >= log.getEffectiveLevel()
-
- # Google wants namespaces to start with letters, change the namespace
- # to start with a letter
- self.namespace = 'p%s' % self.namespace
-
- def get_access_lock(self):
- return null_synchronizer()
-
- def get_creation_lock(self, key):
- # this is weird, should probably be present
- return null_synchronizer()
-
- def do_open(self, flags):
- # If we already loaded the data, don't bother loading it again
- if self.loaded:
- self.flags = flags
- return
-
- item = self.cache.get_by_key_name(self.namespace)
-
- if not item:
- self._is_new = True
- self.hash = {}
- else:
- self._is_new = False
- try:
- self.hash = cPickle.loads(str(item.data))
- except (IOError, OSError, EOFError, cPickle.PickleError):
- if self.log_debug:
- log.debug("Couln't load pickle data, creating new storage")
- self.hash = {}
- self._is_new = True
- self.flags = flags
- self.loaded = True
-
- def do_close(self):
- if self.flags is not None and (self.flags == 'c' or self.flags == 'w'):
- if self._is_new:
- item = self.cache(key_name=self.namespace)
- item.data = cPickle.dumps(self.hash)
- item.created = datetime.now()
- item.accessed = datetime.now()
- item.put()
- self._is_new = False
- else:
- item = self.cache.get_by_key_name(self.namespace)
- item.data = cPickle.dumps(self.hash)
- item.accessed = datetime.now()
- item.put()
- self.flags = None
-
- def do_remove(self):
- item = self.cache.get_by_key_name(self.namespace)
- item.delete()
- self.hash = {}
-
- # We can retain the fact that we did a load attempt, but since the
- # file is gone this will be a new namespace should it be saved.
- self._is_new = True
-
- def __getitem__(self, key):
- return self.hash[key]
-
- def __contains__(self, key):
- return self.hash.has_key(key)
-
- def __setitem__(self, key, value):
- self.hash[key] = value
-
- def __delitem__(self, key):
- del self.hash[key]
-
- def keys(self):
- return self.hash.keys()
-
-
-class GoogleContainer(Container):
- namespace_class = GoogleNamespaceManager
diff --git a/module/lib/beaker/ext/memcached.py b/module/lib/beaker/ext/memcached.py
deleted file mode 100644
index 96516953f..000000000
--- a/module/lib/beaker/ext/memcached.py
+++ /dev/null
@@ -1,82 +0,0 @@
-from beaker.container import NamespaceManager, Container
-from beaker.exceptions import InvalidCacheBackendError, MissingCacheParameter
-from beaker.synchronization import file_synchronizer, null_synchronizer
-from beaker.util import verify_directory, SyncDict
-import warnings
-
-memcache = None
-
-class MemcachedNamespaceManager(NamespaceManager):
- clients = SyncDict()
-
- @classmethod
- def _init_dependencies(cls):
- global memcache
- if memcache is not None:
- return
- try:
- import pylibmc as memcache
- except ImportError:
- try:
- import cmemcache as memcache
- warnings.warn("cmemcache is known to have serious "
- "concurrency issues; consider using 'memcache' or 'pylibmc'")
- except ImportError:
- try:
- import memcache
- except ImportError:
- raise InvalidCacheBackendError("Memcached cache backend requires either "
- "the 'memcache' or 'cmemcache' library")
-
- def __init__(self, namespace, url=None, data_dir=None, lock_dir=None, **params):
- NamespaceManager.__init__(self, namespace)
-
- if not url:
- raise MissingCacheParameter("url is required")
-
- if lock_dir:
- self.lock_dir = lock_dir
- elif data_dir:
- self.lock_dir = data_dir + "/container_mcd_lock"
- if self.lock_dir:
- verify_directory(self.lock_dir)
-
- self.mc = MemcachedNamespaceManager.clients.get(url, memcache.Client, url.split(';'))
-
- def get_creation_lock(self, key):
- return file_synchronizer(
- identifier="memcachedcontainer/funclock/%s" % self.namespace,lock_dir = self.lock_dir)
-
- def _format_key(self, key):
- return self.namespace + '_' + key.replace(' ', '\302\267')
-
- def __getitem__(self, key):
- return self.mc.get(self._format_key(key))
-
- def __contains__(self, key):
- value = self.mc.get(self._format_key(key))
- return value is not None
-
- def has_key(self, key):
- return key in self
-
- def set_value(self, key, value, expiretime=None):
- if expiretime:
- self.mc.set(self._format_key(key), value, time=expiretime)
- else:
- self.mc.set(self._format_key(key), value)
-
- def __setitem__(self, key, value):
- self.set_value(key, value)
-
- def __delitem__(self, key):
- self.mc.delete(self._format_key(key))
-
- def do_remove(self):
- self.mc.flush_all()
-
- def keys(self):
- raise NotImplementedError("Memcache caching does not support iteration of all cache keys")
-
-class MemcachedContainer(Container):
- namespace_class = MemcachedNamespaceManager
diff --git a/module/lib/beaker/ext/sqla.py b/module/lib/beaker/ext/sqla.py
deleted file mode 100644
index 8c79633c1..000000000
--- a/module/lib/beaker/ext/sqla.py
+++ /dev/null
@@ -1,133 +0,0 @@
-import cPickle
-import logging
-import pickle
-from datetime import datetime
-
-from beaker.container import OpenResourceNamespaceManager, Container
-from beaker.exceptions import InvalidCacheBackendError, MissingCacheParameter
-from beaker.synchronization import file_synchronizer, null_synchronizer
-from beaker.util import verify_directory, SyncDict
-
-
-log = logging.getLogger(__name__)
-
-sa = None
-
-class SqlaNamespaceManager(OpenResourceNamespaceManager):
- binds = SyncDict()
- tables = SyncDict()
-
- @classmethod
- def _init_dependencies(cls):
- global sa
- if sa is not None:
- return
- try:
- import sqlalchemy as sa
- except ImportError:
- raise InvalidCacheBackendError("SQLAlchemy, which is required by "
- "this backend, is not installed")
-
- def __init__(self, namespace, bind, table, data_dir=None, lock_dir=None,
- **kwargs):
- """Create a namespace manager for use with a database table via
- SQLAlchemy.
-
- ``bind``
- SQLAlchemy ``Engine`` or ``Connection`` object
-
- ``table``
- SQLAlchemy ``Table`` object in which to store namespace data.
- This should usually be something created by ``make_cache_table``.
- """
- OpenResourceNamespaceManager.__init__(self, namespace)
-
- if lock_dir:
- self.lock_dir = lock_dir
- elif data_dir:
- self.lock_dir = data_dir + "/container_db_lock"
- if self.lock_dir:
- verify_directory(self.lock_dir)
-
- self.bind = self.__class__.binds.get(str(bind.url), lambda: bind)
- self.table = self.__class__.tables.get('%s:%s' % (bind.url, table.name),
- lambda: table)
- self.hash = {}
- self._is_new = False
- self.loaded = False
-
- def get_access_lock(self):
- return null_synchronizer()
-
- def get_creation_lock(self, key):
- return file_synchronizer(
- identifier ="databasecontainer/funclock/%s" % self.namespace,
- lock_dir=self.lock_dir)
-
- def do_open(self, flags):
- if self.loaded:
- self.flags = flags
- return
- select = sa.select([self.table.c.data],
- (self.table.c.namespace == self.namespace))
- result = self.bind.execute(select).fetchone()
- if not result:
- self._is_new = True
- self.hash = {}
- else:
- self._is_new = False
- try:
- self.hash = result['data']
- except (IOError, OSError, EOFError, cPickle.PickleError,
- pickle.PickleError):
- log.debug("Couln't load pickle data, creating new storage")
- self.hash = {}
- self._is_new = True
- self.flags = flags
- self.loaded = True
-
- def do_close(self):
- if self.flags is not None and (self.flags == 'c' or self.flags == 'w'):
- if self._is_new:
- insert = self.table.insert()
- self.bind.execute(insert, namespace=self.namespace, data=self.hash,
- accessed=datetime.now(), created=datetime.now())
- self._is_new = False
- else:
- update = self.table.update(self.table.c.namespace == self.namespace)
- self.bind.execute(update, data=self.hash, accessed=datetime.now())
- self.flags = None
-
- def do_remove(self):
- delete = self.table.delete(self.table.c.namespace == self.namespace)
- self.bind.execute(delete)
- self.hash = {}
- self._is_new = True
-
- def __getitem__(self, key):
- return self.hash[key]
-
- def __contains__(self, key):
- return self.hash.has_key(key)
-
- def __setitem__(self, key, value):
- self.hash[key] = value
-
- def __delitem__(self, key):
- del self.hash[key]
-
- def keys(self):
- return self.hash.keys()
-
-
-class SqlaContainer(Container):
- namespace_manager = SqlaNamespaceManager
-
-def make_cache_table(metadata, table_name='beaker_cache'):
- """Return a ``Table`` object suitable for storing cached values for the
- namespace manager. Do not create the table."""
- return sa.Table(table_name, metadata,
- sa.Column('namespace', sa.String(255), primary_key=True),
- sa.Column('accessed', sa.DateTime, nullable=False),
- sa.Column('created', sa.DateTime, nullable=False),
- sa.Column('data', sa.PickleType, nullable=False))
diff --git a/module/lib/beaker/middleware.py b/module/lib/beaker/middleware.py
deleted file mode 100644
index 7ba88b37d..000000000
--- a/module/lib/beaker/middleware.py
+++ /dev/null
@@ -1,165 +0,0 @@
-import warnings
-
-try:
- from paste.registry import StackedObjectProxy
- beaker_session = StackedObjectProxy(name="Beaker Session")
- beaker_cache = StackedObjectProxy(name="Cache Manager")
-except:
- beaker_cache = None
- beaker_session = None
-
-from beaker.cache import CacheManager
-from beaker.session import Session, SessionObject
-from beaker.util import coerce_cache_params, coerce_session_params, \
- parse_cache_config_options
-
-
-class CacheMiddleware(object):
- cache = beaker_cache
-
- def __init__(self, app, config=None, environ_key='beaker.cache', **kwargs):
- """Initialize the Cache Middleware
-
- The Cache middleware will make a Cache instance available
- every request under the ``environ['beaker.cache']`` key by
- default. The location in environ can be changed by setting
- ``environ_key``.
-
- ``config``
- dict All settings should be prefixed by 'cache.'. This
- method of passing variables is intended for Paste and other
- setups that accumulate multiple component settings in a
- single dictionary. If config contains *no cache. prefixed
- args*, then *all* of the config options will be used to
- intialize the Cache objects.
-
- ``environ_key``
- Location where the Cache instance will keyed in the WSGI
- environ
-
- ``**kwargs``
- All keyword arguments are assumed to be cache settings and
- will override any settings found in ``config``
-
- """
- self.app = app
- config = config or {}
-
- self.options = {}
-
- # Update the options with the parsed config
- self.options.update(parse_cache_config_options(config))
-
- # Add any options from kwargs, but leave out the defaults this
- # time
- self.options.update(
- parse_cache_config_options(kwargs, include_defaults=False))
-
- # Assume all keys are intended for cache if none are prefixed with
- # 'cache.'
- if not self.options and config:
- self.options = config
-
- self.options.update(kwargs)
- self.cache_manager = CacheManager(**self.options)
- self.environ_key = environ_key
-
- def __call__(self, environ, start_response):
- if environ.get('paste.registry'):
- if environ['paste.registry'].reglist:
- environ['paste.registry'].register(self.cache,
- self.cache_manager)
- environ[self.environ_key] = self.cache_manager
- return self.app(environ, start_response)
-
-
-class SessionMiddleware(object):
- session = beaker_session
-
- def __init__(self, wrap_app, config=None, environ_key='beaker.session',
- **kwargs):
- """Initialize the Session Middleware
-
- The Session middleware will make a lazy session instance
- available every request under the ``environ['beaker.session']``
- key by default. The location in environ can be changed by
- setting ``environ_key``.
-
- ``config``
- dict All settings should be prefixed by 'session.'. This
- method of passing variables is intended for Paste and other
- setups that accumulate multiple component settings in a
- single dictionary. If config contains *no cache. prefixed
- args*, then *all* of the config options will be used to
- intialize the Cache objects.
-
- ``environ_key``
- Location where the Session instance will keyed in the WSGI
- environ
-
- ``**kwargs``
- All keyword arguments are assumed to be session settings and
- will override any settings found in ``config``
-
- """
- config = config or {}
-
- # Load up the default params
- self.options = dict(invalidate_corrupt=True, type=None,
- data_dir=None, key='beaker.session.id',
- timeout=None, secret=None, log_file=None)
-
- # Pull out any config args meant for beaker session. if there are any
- for dct in [config, kwargs]:
- for key, val in dct.iteritems():
- if key.startswith('beaker.session.'):
- self.options[key[15:]] = val
- if key.startswith('session.'):
- self.options[key[8:]] = val
- if key.startswith('session_'):
- warnings.warn('Session options should start with session. '
- 'instead of session_.', DeprecationWarning, 2)
- self.options[key[8:]] = val
-
- # Coerce and validate session params
- coerce_session_params(self.options)
-
- # Assume all keys are intended for cache if none are prefixed with
- # 'cache.'
- if not self.options and config:
- self.options = config
-
- self.options.update(kwargs)
- self.wrap_app = wrap_app
- self.environ_key = environ_key
-
- def __call__(self, environ, start_response):
- session = SessionObject(environ, **self.options)
- if environ.get('paste.registry'):
- if environ['paste.registry'].reglist:
- environ['paste.registry'].register(self.session, session)
- environ[self.environ_key] = session
- environ['beaker.get_session'] = self._get_session
-
- def session_start_response(status, headers, exc_info = None):
- if session.accessed():
- session.persist()
- if session.__dict__['_headers']['set_cookie']:
- cookie = session.__dict__['_headers']['cookie_out']
- if cookie:
- headers.append(('Set-cookie', cookie))
- return start_response(status, headers, exc_info)
- return self.wrap_app(environ, session_start_response)
-
- def _get_session(self):
- return Session({}, use_cookies=False, **self.options)
-
-
-def session_filter_factory(global_conf, **kwargs):
- def filter(app):
- return SessionMiddleware(app, global_conf, **kwargs)
- return filter
-
-
-def session_filter_app_factory(app, global_conf, **kwargs):
- return SessionMiddleware(app, global_conf, **kwargs)
diff --git a/module/lib/beaker/session.py b/module/lib/beaker/session.py
deleted file mode 100644
index 7d465530b..000000000
--- a/module/lib/beaker/session.py
+++ /dev/null
@@ -1,618 +0,0 @@
-import Cookie
-import os
-import random
-import time
-from datetime import datetime, timedelta
-
-from beaker.crypto import hmac as HMAC, hmac_sha1 as SHA1, md5
-from beaker.util import pickle
-
-from beaker import crypto
-from beaker.cache import clsmap
-from beaker.exceptions import BeakerException, InvalidCryptoBackendError
-from base64 import b64encode, b64decode
-
-
-__all__ = ['SignedCookie', 'Session']
-
-getpid = hasattr(os, 'getpid') and os.getpid or (lambda : '')
-
-class SignedCookie(Cookie.BaseCookie):
- """Extends python cookie to give digital signature support"""
- def __init__(self, secret, input=None):
- self.secret = secret
- Cookie.BaseCookie.__init__(self, input)
-
- def value_decode(self, val):
- val = val.strip('"')
- sig = HMAC.new(self.secret, val[40:], SHA1).hexdigest()
-
- # Avoid timing attacks
- invalid_bits = 0
- input_sig = val[:40]
- if len(sig) != len(input_sig):
- return None, val
-
- for a, b in zip(sig, input_sig):
- invalid_bits += a != b
-
- if invalid_bits:
- return None, val
- else:
- return val[40:], val
-
- def value_encode(self, val):
- sig = HMAC.new(self.secret, val, SHA1).hexdigest()
- return str(val), ("%s%s" % (sig, val))
-
-
-class Session(dict):
- """Session object that uses container package for storage.
-
- ``key``
- The name the cookie should be set to.
- ``timeout``
- How long session data is considered valid. This is used
- regardless of the cookie being present or not to determine
- whether session data is still valid.
- ``cookie_domain``
- Domain to use for the cookie.
- ``secure``
- Whether or not the cookie should only be sent over SSL.
- """
- def __init__(self, request, id=None, invalidate_corrupt=False,
- use_cookies=True, type=None, data_dir=None,
- key='beaker.session.id', timeout=None, cookie_expires=True,
- cookie_domain=None, secret=None, secure=False,
- namespace_class=None, **namespace_args):
- if not type:
- if data_dir:
- self.type = 'file'
- else:
- self.type = 'memory'
- else:
- self.type = type
-
- self.namespace_class = namespace_class or clsmap[self.type]
-
- self.namespace_args = namespace_args
-
- self.request = request
- self.data_dir = data_dir
- self.key = key
-
- self.timeout = timeout
- self.use_cookies = use_cookies
- self.cookie_expires = cookie_expires
-
- # Default cookie domain/path
- self._domain = cookie_domain
- self._path = '/'
- self.was_invalidated = False
- self.secret = secret
- self.secure = secure
- self.id = id
- self.accessed_dict = {}
-
- if self.use_cookies:
- cookieheader = request.get('cookie', '')
- if secret:
- try:
- self.cookie = SignedCookie(secret, input=cookieheader)
- except Cookie.CookieError:
- self.cookie = SignedCookie(secret, input=None)
- else:
- self.cookie = Cookie.SimpleCookie(input=cookieheader)
-
- if not self.id and self.key in self.cookie:
- self.id = self.cookie[self.key].value
-
- self.is_new = self.id is None
- if self.is_new:
- self._create_id()
- self['_accessed_time'] = self['_creation_time'] = time.time()
- else:
- try:
- self.load()
- except:
- if invalidate_corrupt:
- self.invalidate()
- else:
- raise
-
- def _create_id(self):
- self.id = md5(
- md5("%f%s%f%s" % (time.time(), id({}), random.random(),
- getpid())).hexdigest(),
- ).hexdigest()
- self.is_new = True
- self.last_accessed = None
- if self.use_cookies:
- self.cookie[self.key] = self.id
- if self._domain:
- self.cookie[self.key]['domain'] = self._domain
- if self.secure:
- self.cookie[self.key]['secure'] = True
- self.cookie[self.key]['path'] = self._path
- if self.cookie_expires is not True:
- if self.cookie_expires is False:
- expires = datetime.fromtimestamp( 0x7FFFFFFF )
- elif isinstance(self.cookie_expires, timedelta):
- expires = datetime.today() + self.cookie_expires
- elif isinstance(self.cookie_expires, datetime):
- expires = self.cookie_expires
- else:
- raise ValueError("Invalid argument for cookie_expires: %s"
- % repr(self.cookie_expires))
- self.cookie[self.key]['expires'] = \
- expires.strftime("%a, %d-%b-%Y %H:%M:%S GMT" )
- self.request['cookie_out'] = self.cookie[self.key].output(header='')
- self.request['set_cookie'] = False
-
- def created(self):
- return self['_creation_time']
- created = property(created)
-
- def _set_domain(self, domain):
- self['_domain'] = domain
- self.cookie[self.key]['domain'] = domain
- self.request['cookie_out'] = self.cookie[self.key].output(header='')
- self.request['set_cookie'] = True
-
- def _get_domain(self):
- return self._domain
-
- domain = property(_get_domain, _set_domain)
-
- def _set_path(self, path):
- self['_path'] = path
- self.cookie[self.key]['path'] = path
- self.request['cookie_out'] = self.cookie[self.key].output(header='')
- self.request['set_cookie'] = True
-
- def _get_path(self):
- return self._path
-
- path = property(_get_path, _set_path)
-
- def _delete_cookie(self):
- self.request['set_cookie'] = True
- self.cookie[self.key] = self.id
- if self._domain:
- self.cookie[self.key]['domain'] = self._domain
- if self.secure:
- self.cookie[self.key]['secure'] = True
- self.cookie[self.key]['path'] = '/'
- expires = datetime.today().replace(year=2003)
- self.cookie[self.key]['expires'] = \
- expires.strftime("%a, %d-%b-%Y %H:%M:%S GMT" )
- self.request['cookie_out'] = self.cookie[self.key].output(header='')
- self.request['set_cookie'] = True
-
- def delete(self):
- """Deletes the session from the persistent storage, and sends
- an expired cookie out"""
- if self.use_cookies:
- self._delete_cookie()
- self.clear()
-
- def invalidate(self):
- """Invalidates this session, creates a new session id, returns
- to the is_new state"""
- self.clear()
- self.was_invalidated = True
- self._create_id()
- self.load()
-
- def load(self):
- "Loads the data from this session from persistent storage"
- self.namespace = self.namespace_class(self.id,
- data_dir=self.data_dir, digest_filenames=False,
- **self.namespace_args)
- now = time.time()
- self.request['set_cookie'] = True
-
- self.namespace.acquire_read_lock()
- timed_out = False
- try:
- self.clear()
- try:
- session_data = self.namespace['session']
-
- # Memcached always returns a key, its None when its not
- # present
- if session_data is None:
- session_data = {
- '_creation_time':now,
- '_accessed_time':now
- }
- self.is_new = True
- except (KeyError, TypeError):
- session_data = {
- '_creation_time':now,
- '_accessed_time':now
- }
- self.is_new = True
-
- if self.timeout is not None and \
- now - session_data['_accessed_time'] > self.timeout:
- timed_out= True
- else:
- # Properly set the last_accessed time, which is different
- # than the *currently* _accessed_time
- if self.is_new or '_accessed_time' not in session_data:
- self.last_accessed = None
- else:
- self.last_accessed = session_data['_accessed_time']
-
- # Update the current _accessed_time
- session_data['_accessed_time'] = now
- self.update(session_data)
- self.accessed_dict = session_data.copy()
- finally:
- self.namespace.release_read_lock()
- if timed_out:
- self.invalidate()
-
- def save(self, accessed_only=False):
- """Saves the data for this session to persistent storage
-
- If accessed_only is True, then only the original data loaded
- at the beginning of the request will be saved, with the updated
- last accessed time.
-
- """
- # Look to see if its a new session that was only accessed
- # Don't save it under that case
- if accessed_only and self.is_new:
- return None
-
- if not hasattr(self, 'namespace'):
- self.namespace = self.namespace_class(
- self.id,
- data_dir=self.data_dir,
- digest_filenames=False,
- **self.namespace_args)
-
- self.namespace.acquire_write_lock()
- try:
- if accessed_only:
- data = dict(self.accessed_dict.items())
- else:
- data = dict(self.items())
-
- # Save the data
- if not data and 'session' in self.namespace:
- del self.namespace['session']
- else:
- self.namespace['session'] = data
- finally:
- self.namespace.release_write_lock()
- if self.is_new:
- self.request['set_cookie'] = True
-
- def revert(self):
- """Revert the session to its original state from its first
- access in the request"""
- self.clear()
- self.update(self.accessed_dict)
-
- # TODO: I think both these methods should be removed. They're from
- # the original mod_python code i was ripping off but they really
- # have no use here.
- def lock(self):
- """Locks this session against other processes/threads. This is
- automatic when load/save is called.
-
- ***use with caution*** and always with a corresponding 'unlock'
- inside a "finally:" block, as a stray lock typically cannot be
- unlocked without shutting down the whole application.
-
- """
- self.namespace.acquire_write_lock()
-
- def unlock(self):
- """Unlocks this session against other processes/threads. This
- is automatic when load/save is called.
-
- ***use with caution*** and always within a "finally:" block, as
- a stray lock typically cannot be unlocked without shutting down
- the whole application.
-
- """
- self.namespace.release_write_lock()
-
-class CookieSession(Session):
- """Pure cookie-based session
-
- Options recognized when using cookie-based sessions are slightly
- more restricted than general sessions.
-
- ``key``
- The name the cookie should be set to.
- ``timeout``
- How long session data is considered valid. This is used
- regardless of the cookie being present or not to determine
- whether session data is still valid.
- ``encrypt_key``
- The key to use for the session encryption, if not provided the
- session will not be encrypted.
- ``validate_key``
- The key used to sign the encrypted session
- ``cookie_domain``
- Domain to use for the cookie.
- ``secure``
- Whether or not the cookie should only be sent over SSL.
-
- """
- def __init__(self, request, key='beaker.session.id', timeout=None,
- cookie_expires=True, cookie_domain=None, encrypt_key=None,
- validate_key=None, secure=False, **kwargs):
-
- if not crypto.has_aes and encrypt_key:
- raise InvalidCryptoBackendError("No AES library is installed, can't generate "
- "encrypted cookie-only Session.")
-
- self.request = request
- self.key = key
- self.timeout = timeout
- self.cookie_expires = cookie_expires
- self.encrypt_key = encrypt_key
- self.validate_key = validate_key
- self.request['set_cookie'] = False
- self.secure = secure
- self._domain = cookie_domain
- self._path = '/'
-
- try:
- cookieheader = request['cookie']
- except KeyError:
- cookieheader = ''
-
- if validate_key is None:
- raise BeakerException("No validate_key specified for Cookie only "
- "Session.")
-
- try:
- self.cookie = SignedCookie(validate_key, input=cookieheader)
- except Cookie.CookieError:
- self.cookie = SignedCookie(validate_key, input=None)
-
- self['_id'] = self._make_id()
- self.is_new = True
-
- # If we have a cookie, load it
- if self.key in self.cookie and self.cookie[self.key].value is not None:
- self.is_new = False
- try:
- self.update(self._decrypt_data())
- except:
- pass
- if self.timeout is not None and time.time() - \
- self['_accessed_time'] > self.timeout:
- self.clear()
- self.accessed_dict = self.copy()
- self._create_cookie()
-
- def created(self):
- return self['_creation_time']
- created = property(created)
-
- def id(self):
- return self['_id']
- id = property(id)
-
- def _set_domain(self, domain):
- self['_domain'] = domain
- self._domain = domain
-
- def _get_domain(self):
- return self._domain
-
- domain = property(_get_domain, _set_domain)
-
- def _set_path(self, path):
- self['_path'] = path
- self._path = path
-
- def _get_path(self):
- return self._path
-
- path = property(_get_path, _set_path)
-
- def _encrypt_data(self):
- """Serialize, encipher, and base64 the session dict"""
- if self.encrypt_key:
- nonce = b64encode(os.urandom(40))[:8]
- encrypt_key = crypto.generateCryptoKeys(self.encrypt_key,
- self.validate_key + nonce, 1)
- data = pickle.dumps(self.copy(), 2)
- return nonce + b64encode(crypto.aesEncrypt(data, encrypt_key))
- else:
- data = pickle.dumps(self.copy(), 2)
- return b64encode(data)
-
- def _decrypt_data(self):
- """Bas64, decipher, then un-serialize the data for the session
- dict"""
- if self.encrypt_key:
- nonce = self.cookie[self.key].value[:8]
- encrypt_key = crypto.generateCryptoKeys(self.encrypt_key,
- self.validate_key + nonce, 1)
- payload = b64decode(self.cookie[self.key].value[8:])
- data = crypto.aesDecrypt(payload, encrypt_key)
- return pickle.loads(data)
- else:
- data = b64decode(self.cookie[self.key].value)
- return pickle.loads(data)
-
- def _make_id(self):
- return md5(md5(
- "%f%s%f%s" % (time.time(), id({}), random.random(), getpid())
- ).hexdigest()
- ).hexdigest()
-
- def save(self, accessed_only=False):
- """Saves the data for this session to persistent storage"""
- if accessed_only and self.is_new:
- return
- if accessed_only:
- self.clear()
- self.update(self.accessed_dict)
- self._create_cookie()
-
- def expire(self):
- """Delete the 'expires' attribute on this Session, if any."""
-
- self.pop('_expires', None)
-
- def _create_cookie(self):
- if '_creation_time' not in self:
- self['_creation_time'] = time.time()
- if '_id' not in self:
- self['_id'] = self._make_id()
- self['_accessed_time'] = time.time()
-
- if self.cookie_expires is not True:
- if self.cookie_expires is False:
- expires = datetime.fromtimestamp( 0x7FFFFFFF )
- elif isinstance(self.cookie_expires, timedelta):
- expires = datetime.today() + self.cookie_expires
- elif isinstance(self.cookie_expires, datetime):
- expires = self.cookie_expires
- else:
- raise ValueError("Invalid argument for cookie_expires: %s"
- % repr(self.cookie_expires))
- self['_expires'] = expires
- elif '_expires' in self:
- expires = self['_expires']
- else:
- expires = None
-
- val = self._encrypt_data()
- if len(val) > 4064:
- raise BeakerException("Cookie value is too long to store")
-
- self.cookie[self.key] = val
- if '_domain' in self:
- self.cookie[self.key]['domain'] = self['_domain']
- elif self._domain:
- self.cookie[self.key]['domain'] = self._domain
- if self.secure:
- self.cookie[self.key]['secure'] = True
-
- self.cookie[self.key]['path'] = self.get('_path', '/')
-
- if expires:
- self.cookie[self.key]['expires'] = \
- expires.strftime("%a, %d-%b-%Y %H:%M:%S GMT" )
- self.request['cookie_out'] = self.cookie[self.key].output(header='')
- self.request['set_cookie'] = True
-
- def delete(self):
- """Delete the cookie, and clear the session"""
- # Send a delete cookie request
- self._delete_cookie()
- self.clear()
-
- def invalidate(self):
- """Clear the contents and start a new session"""
- self.delete()
- self['_id'] = self._make_id()
-
-
-class SessionObject(object):
- """Session proxy/lazy creator
-
- This object proxies access to the actual session object, so that in
- the case that the session hasn't been used before, it will be
- setup. This avoid creating and loading the session from persistent
- storage unless its actually used during the request.
-
- """
- def __init__(self, environ, **params):
- self.__dict__['_params'] = params
- self.__dict__['_environ'] = environ
- self.__dict__['_sess'] = None
- self.__dict__['_headers'] = []
-
- def _session(self):
- """Lazy initial creation of session object"""
- if self.__dict__['_sess'] is None:
- params = self.__dict__['_params']
- environ = self.__dict__['_environ']
- self.__dict__['_headers'] = req = {'cookie_out':None}
- req['cookie'] = environ.get('HTTP_COOKIE')
- if params.get('type') == 'cookie':
- self.__dict__['_sess'] = CookieSession(req, **params)
- else:
- self.__dict__['_sess'] = Session(req, use_cookies=True,
- **params)
- return self.__dict__['_sess']
-
- def __getattr__(self, attr):
- return getattr(self._session(), attr)
-
- def __setattr__(self, attr, value):
- setattr(self._session(), attr, value)
-
- def __delattr__(self, name):
- self._session().__delattr__(name)
-
- def __getitem__(self, key):
- return self._session()[key]
-
- def __setitem__(self, key, value):
- self._session()[key] = value
-
- def __delitem__(self, key):
- self._session().__delitem__(key)
-
- def __repr__(self):
- return self._session().__repr__()
-
- def __iter__(self):
- """Only works for proxying to a dict"""
- return iter(self._session().keys())
-
- def __contains__(self, key):
- return self._session().has_key(key)
-
- def get_by_id(self, id):
- """Loads a session given a session ID"""
- params = self.__dict__['_params']
- session = Session({}, use_cookies=False, id=id, **params)
- if session.is_new:
- return None
- return session
-
- def save(self):
- self.__dict__['_dirty'] = True
-
- def delete(self):
- self.__dict__['_dirty'] = True
- self._session().delete()
-
- def persist(self):
- """Persist the session to the storage
-
- If its set to autosave, then the entire session will be saved
- regardless of if save() has been called. Otherwise, just the
- accessed time will be updated if save() was not called, or
- the session will be saved if save() was called.
-
- """
- if self.__dict__['_params'].get('auto'):
- self._session().save()
- else:
- if self.__dict__.get('_dirty'):
- self._session().save()
- else:
- self._session().save(accessed_only=True)
-
- def dirty(self):
- return self.__dict__.get('_dirty', False)
-
- def accessed(self):
- """Returns whether or not the session has been accessed"""
- return self.__dict__['_sess'] is not None
diff --git a/module/lib/beaker/synchronization.py b/module/lib/beaker/synchronization.py
deleted file mode 100644
index 761303707..000000000
--- a/module/lib/beaker/synchronization.py
+++ /dev/null
@@ -1,381 +0,0 @@
-"""Synchronization functions.
-
-File- and mutex-based mutual exclusion synchronizers are provided,
-as well as a name-based mutex which locks within an application
-based on a string name.
-
-"""
-
-import os
-import sys
-import tempfile
-
-try:
- import threading as _threading
-except ImportError:
- import dummy_threading as _threading
-
-# check for fcntl module
-try:
- sys.getwindowsversion()
- has_flock = False
-except:
- try:
- import fcntl
- has_flock = True
- except ImportError:
- has_flock = False
-
-from beaker import util
-from beaker.exceptions import LockError
-
-__all__ = ["file_synchronizer", "mutex_synchronizer", "null_synchronizer",
- "NameLock", "_threading"]
-
-
-class NameLock(object):
- """a proxy for an RLock object that is stored in a name based
- registry.
-
- Multiple threads can get a reference to the same RLock based on the
- name alone, and synchronize operations related to that name.
-
- """
- locks = util.WeakValuedRegistry()
-
- class NLContainer(object):
- def __init__(self, reentrant):
- if reentrant:
- self.lock = _threading.RLock()
- else:
- self.lock = _threading.Lock()
- def __call__(self):
- return self.lock
-
- def __init__(self, identifier = None, reentrant = False):
- if identifier is None:
- self._lock = NameLock.NLContainer(reentrant)
- else:
- self._lock = NameLock.locks.get(identifier, NameLock.NLContainer,
- reentrant)
-
- def acquire(self, wait = True):
- return self._lock().acquire(wait)
-
- def release(self):
- self._lock().release()
-
-
-_synchronizers = util.WeakValuedRegistry()
-def _synchronizer(identifier, cls, **kwargs):
- return _synchronizers.sync_get((identifier, cls), cls, identifier, **kwargs)
-
-
-def file_synchronizer(identifier, **kwargs):
- if not has_flock or 'lock_dir' not in kwargs:
- return mutex_synchronizer(identifier)
- else:
- return _synchronizer(identifier, FileSynchronizer, **kwargs)
-
-
-def mutex_synchronizer(identifier, **kwargs):
- return _synchronizer(identifier, ConditionSynchronizer, **kwargs)
-
-
-class null_synchronizer(object):
- def acquire_write_lock(self, wait=True):
- return True
- def acquire_read_lock(self):
- pass
- def release_write_lock(self):
- pass
- def release_read_lock(self):
- pass
- acquire = acquire_write_lock
- release = release_write_lock
-
-
-class SynchronizerImpl(object):
- def __init__(self):
- self._state = util.ThreadLocal()
-
- class SyncState(object):
- __slots__ = 'reentrantcount', 'writing', 'reading'
-
- def __init__(self):
- self.reentrantcount = 0
- self.writing = False
- self.reading = False
-
- def state(self):
- if not self._state.has():
- state = SynchronizerImpl.SyncState()
- self._state.put(state)
- return state
- else:
- return self._state.get()
- state = property(state)
-
- def release_read_lock(self):
- state = self.state
-
- if state.writing:
- raise LockError("lock is in writing state")
- if not state.reading:
- raise LockError("lock is not in reading state")
-
- if state.reentrantcount == 1:
- self.do_release_read_lock()
- state.reading = False
-
- state.reentrantcount -= 1
-
- def acquire_read_lock(self, wait = True):
- state = self.state
-
- if state.writing:
- raise LockError("lock is in writing state")
-
- if state.reentrantcount == 0:
- x = self.do_acquire_read_lock(wait)
- if (wait or x):
- state.reentrantcount += 1
- state.reading = True
- return x
- elif state.reading:
- state.reentrantcount += 1
- return True
-
- def release_write_lock(self):
- state = self.state
-
- if state.reading:
- raise LockError("lock is in reading state")
- if not state.writing:
- raise LockError("lock is not in writing state")
-
- if state.reentrantcount == 1:
- self.do_release_write_lock()
- state.writing = False
-
- state.reentrantcount -= 1
-
- release = release_write_lock
-
- def acquire_write_lock(self, wait = True):
- state = self.state
-
- if state.reading:
- raise LockError("lock is in reading state")
-
- if state.reentrantcount == 0:
- x = self.do_acquire_write_lock(wait)
- if (wait or x):
- state.reentrantcount += 1
- state.writing = True
- return x
- elif state.writing:
- state.reentrantcount += 1
- return True
-
- acquire = acquire_write_lock
-
- def do_release_read_lock(self):
- raise NotImplementedError()
-
- def do_acquire_read_lock(self):
- raise NotImplementedError()
-
- def do_release_write_lock(self):
- raise NotImplementedError()
-
- def do_acquire_write_lock(self):
- raise NotImplementedError()
-
-
-class FileSynchronizer(SynchronizerImpl):
- """a synchronizer which locks using flock().
-
- Adapted for Python/multithreads from Apache::Session::Lock::File,
- http://search.cpan.org/src/CWEST/Apache-Session-1.81/Session/Lock/File.pm
-
- This module does not unlink temporary files,
- because it interferes with proper locking. This can cause
- problems on certain systems (Linux) whose file systems (ext2) do not
- perform well with lots of files in one directory. To prevent this
- you should use a script to clean out old files from your lock directory.
-
- """
- def __init__(self, identifier, lock_dir):
- super(FileSynchronizer, self).__init__()
- self._filedescriptor = util.ThreadLocal()
-
- if lock_dir is None:
- lock_dir = tempfile.gettempdir()
- else:
- lock_dir = lock_dir
-
- self.filename = util.encoded_path(
- lock_dir,
- [identifier],
- extension='.lock'
- )
-
- def _filedesc(self):
- return self._filedescriptor.get()
- _filedesc = property(_filedesc)
-
- def _open(self, mode):
- filedescriptor = self._filedesc
- if filedescriptor is None:
- filedescriptor = os.open(self.filename, mode)
- self._filedescriptor.put(filedescriptor)
- return filedescriptor
-
- def do_acquire_read_lock(self, wait):
- filedescriptor = self._open(os.O_CREAT | os.O_RDONLY)
- if not wait:
- try:
- fcntl.flock(filedescriptor, fcntl.LOCK_SH | fcntl.LOCK_NB)
- return True
- except IOError:
- os.close(filedescriptor)
- self._filedescriptor.remove()
- return False
- else:
- fcntl.flock(filedescriptor, fcntl.LOCK_SH)
- return True
-
- def do_acquire_write_lock(self, wait):
- filedescriptor = self._open(os.O_CREAT | os.O_WRONLY)
- if not wait:
- try:
- fcntl.flock(filedescriptor, fcntl.LOCK_EX | fcntl.LOCK_NB)
- return True
- except IOError:
- os.close(filedescriptor)
- self._filedescriptor.remove()
- return False
- else:
- fcntl.flock(filedescriptor, fcntl.LOCK_EX)
- return True
-
- def do_release_read_lock(self):
- self._release_all_locks()
-
- def do_release_write_lock(self):
- self._release_all_locks()
-
- def _release_all_locks(self):
- filedescriptor = self._filedesc
- if filedescriptor is not None:
- fcntl.flock(filedescriptor, fcntl.LOCK_UN)
- os.close(filedescriptor)
- self._filedescriptor.remove()
-
-
-class ConditionSynchronizer(SynchronizerImpl):
- """a synchronizer using a Condition."""
-
- def __init__(self, identifier):
- super(ConditionSynchronizer, self).__init__()
-
- # counts how many asynchronous methods are executing
- self.async = 0
-
- # pointer to thread that is the current sync operation
- self.current_sync_operation = None
-
- # condition object to lock on
- self.condition = _threading.Condition(_threading.Lock())
-
- def do_acquire_read_lock(self, wait = True):
- self.condition.acquire()
- try:
- # see if a synchronous operation is waiting to start
- # or is already running, in which case we wait (or just
- # give up and return)
- if wait:
- while self.current_sync_operation is not None:
- self.condition.wait()
- else:
- if self.current_sync_operation is not None:
- return False
-
- self.async += 1
- finally:
- self.condition.release()
-
- if not wait:
- return True
-
- def do_release_read_lock(self):
- self.condition.acquire()
- try:
- self.async -= 1
-
- # check if we are the last asynchronous reader thread
- # out the door.
- if self.async == 0:
- # yes. so if a sync operation is waiting, notifyAll to wake
- # it up
- if self.current_sync_operation is not None:
- self.condition.notifyAll()
- elif self.async < 0:
- raise LockError("Synchronizer error - too many "
- "release_read_locks called")
- finally:
- self.condition.release()
-
- def do_acquire_write_lock(self, wait = True):
- self.condition.acquire()
- try:
- # here, we are not a synchronous reader, and after returning,
- # assuming waiting or immediate availability, we will be.
-
- if wait:
- # if another sync is working, wait
- while self.current_sync_operation is not None:
- self.condition.wait()
- else:
- # if another sync is working,
- # we dont want to wait, so forget it
- if self.current_sync_operation is not None:
- return False
-
- # establish ourselves as the current sync
- # this indicates to other read/write operations
- # that they should wait until this is None again
- self.current_sync_operation = _threading.currentThread()
-
- # now wait again for asyncs to finish
- if self.async > 0:
- if wait:
- # wait
- self.condition.wait()
- else:
- # we dont want to wait, so forget it
- self.current_sync_operation = None
- return False
- finally:
- self.condition.release()
-
- if not wait:
- return True
-
- def do_release_write_lock(self):
- self.condition.acquire()
- try:
- if self.current_sync_operation is not _threading.currentThread():
- raise LockError("Synchronizer error - current thread doesnt "
- "have the write lock")
-
- # reset the current sync operation so
- # another can get it
- self.current_sync_operation = None
-
- # tell everyone to get ready
- self.condition.notifyAll()
- finally:
- # everyone go !!
- self.condition.release()
diff --git a/module/lib/beaker/util.py b/module/lib/beaker/util.py
deleted file mode 100644
index 04c9617c5..000000000
--- a/module/lib/beaker/util.py
+++ /dev/null
@@ -1,302 +0,0 @@
-"""Beaker utilities"""
-
-try:
- import thread as _thread
- import threading as _threading
-except ImportError:
- import dummy_thread as _thread
- import dummy_threading as _threading
-
-from datetime import datetime, timedelta
-import os
-import string
-import types
-import weakref
-import warnings
-import sys
-
-py3k = getattr(sys, 'py3kwarning', False) or sys.version_info >= (3, 0)
-py24 = sys.version_info < (2,5)
-jython = sys.platform.startswith('java')
-
-if py3k or jython:
- import pickle
-else:
- import cPickle as pickle
-
-from beaker.converters import asbool
-from threading import local as _tlocal
-
-
-__all__ = ["ThreadLocal", "Registry", "WeakValuedRegistry", "SyncDict",
- "encoded_path", "verify_directory"]
-
-
-def verify_directory(dir):
- """verifies and creates a directory. tries to
- ignore collisions with other threads and processes."""
-
- tries = 0
- while not os.access(dir, os.F_OK):
- try:
- tries += 1
- os.makedirs(dir)
- except:
- if tries > 5:
- raise
-
-
-def deprecated(message):
- def wrapper(fn):
- def deprecated_method(*args, **kargs):
- warnings.warn(message, DeprecationWarning, 2)
- return fn(*args, **kargs)
- # TODO: use decorator ? functools.wrapper ?
- deprecated_method.__name__ = fn.__name__
- deprecated_method.__doc__ = "%s\n\n%s" % (message, fn.__doc__)
- return deprecated_method
- return wrapper
-
-class ThreadLocal(object):
- """stores a value on a per-thread basis"""
-
- __slots__ = '_tlocal'
-
- def __init__(self):
- self._tlocal = _tlocal()
-
- def put(self, value):
- self._tlocal.value = value
-
- def has(self):
- return hasattr(self._tlocal, 'value')
-
- def get(self, default=None):
- return getattr(self._tlocal, 'value', default)
-
- def remove(self):
- del self._tlocal.value
-
-class SyncDict(object):
- """
- An efficient/threadsafe singleton map algorithm, a.k.a.
- "get a value based on this key, and create if not found or not
- valid" paradigm:
-
- exists && isvalid ? get : create
-
- Designed to work with weakref dictionaries to expect items
- to asynchronously disappear from the dictionary.
-
- Use python 2.3.3 or greater ! a major bug was just fixed in Nov.
- 2003 that was driving me nuts with garbage collection/weakrefs in
- this section.
-
- """
- def __init__(self):
- self.mutex = _thread.allocate_lock()
- self.dict = {}
-
- def get(self, key, createfunc, *args, **kwargs):
- try:
- if self.has_key(key):
- return self.dict[key]
- else:
- return self.sync_get(key, createfunc, *args, **kwargs)
- except KeyError:
- return self.sync_get(key, createfunc, *args, **kwargs)
-
- def sync_get(self, key, createfunc, *args, **kwargs):
- self.mutex.acquire()
- try:
- try:
- if self.has_key(key):
- return self.dict[key]
- else:
- return self._create(key, createfunc, *args, **kwargs)
- except KeyError:
- return self._create(key, createfunc, *args, **kwargs)
- finally:
- self.mutex.release()
-
- def _create(self, key, createfunc, *args, **kwargs):
- self[key] = obj = createfunc(*args, **kwargs)
- return obj
-
- def has_key(self, key):
- return self.dict.has_key(key)
-
- def __contains__(self, key):
- return self.dict.__contains__(key)
- def __getitem__(self, key):
- return self.dict.__getitem__(key)
- def __setitem__(self, key, value):
- self.dict.__setitem__(key, value)
- def __delitem__(self, key):
- return self.dict.__delitem__(key)
- def clear(self):
- self.dict.clear()
-
-
-class WeakValuedRegistry(SyncDict):
- def __init__(self):
- self.mutex = _threading.RLock()
- self.dict = weakref.WeakValueDictionary()
-
-sha1 = None
-def encoded_path(root, identifiers, extension = ".enc", depth = 3,
- digest_filenames=True):
-
- """Generate a unique file-accessible path from the given list of
- identifiers starting at the given root directory."""
- ident = "_".join(identifiers)
-
- global sha1
- if sha1 is None:
- from beaker.crypto import sha1
-
- if digest_filenames:
- if py3k:
- ident = sha1(ident.encode('utf-8')).hexdigest()
- else:
- ident = sha1(ident).hexdigest()
-
- ident = os.path.basename(ident)
-
- tokens = []
- for d in range(1, depth):
- tokens.append(ident[0:d])
-
- dir = os.path.join(root, *tokens)
- verify_directory(dir)
-
- return os.path.join(dir, ident + extension)
-
-
-def verify_options(opt, types, error):
- if not isinstance(opt, types):
- if not isinstance(types, tuple):
- types = (types,)
- coerced = False
- for typ in types:
- try:
- if typ in (list, tuple):
- opt = [x.strip() for x in opt.split(',')]
- else:
- if typ == bool:
- typ = asbool
- opt = typ(opt)
- coerced = True
- except:
- pass
- if coerced:
- break
- if not coerced:
- raise Exception(error)
- elif isinstance(opt, str) and not opt.strip():
- raise Exception("Empty strings are invalid for: %s" % error)
- return opt
-
-
-def verify_rules(params, ruleset):
- for key, types, message in ruleset:
- if key in params:
- params[key] = verify_options(params[key], types, message)
- return params
-
-
-def coerce_session_params(params):
- rules = [
- ('data_dir', (str, types.NoneType), "data_dir must be a string "
- "referring to a directory."),
- ('lock_dir', (str, types.NoneType), "lock_dir must be a string referring to a "
- "directory."),
- ('type', (str, types.NoneType), "Session type must be a string."),
- ('cookie_expires', (bool, datetime, timedelta), "Cookie expires was "
- "not a boolean, datetime, or timedelta instance."),
- ('cookie_domain', (str, types.NoneType), "Cookie domain must be a "
- "string."),
- ('id', (str,), "Session id must be a string."),
- ('key', (str,), "Session key must be a string."),
- ('secret', (str, types.NoneType), "Session secret must be a string."),
- ('validate_key', (str, types.NoneType), "Session encrypt_key must be "
- "a string."),
- ('encrypt_key', (str, types.NoneType), "Session validate_key must be "
- "a string."),
- ('secure', (bool, types.NoneType), "Session secure must be a boolean."),
- ('timeout', (int, types.NoneType), "Session timeout must be an "
- "integer."),
- ('auto', (bool, types.NoneType), "Session is created if accessed."),
- ]
- return verify_rules(params, rules)
-
-
-def coerce_cache_params(params):
- rules = [
- ('data_dir', (str, types.NoneType), "data_dir must be a string "
- "referring to a directory."),
- ('lock_dir', (str, types.NoneType), "lock_dir must be a string referring to a "
- "directory."),
- ('type', (str,), "Cache type must be a string."),
- ('enabled', (bool, types.NoneType), "enabled must be true/false "
- "if present."),
- ('expire', (int, types.NoneType), "expire must be an integer representing "
- "how many seconds the cache is valid for"),
- ('regions', (list, tuple, types.NoneType), "Regions must be a "
- "comma seperated list of valid regions")
- ]
- return verify_rules(params, rules)
-
-
-def parse_cache_config_options(config, include_defaults=True):
- """Parse configuration options and validate for use with the
- CacheManager"""
-
- # Load default cache options
- if include_defaults:
- options= dict(type='memory', data_dir=None, expire=None,
- log_file=None)
- else:
- options = {}
- for key, val in config.iteritems():
- if key.startswith('beaker.cache.'):
- options[key[13:]] = val
- if key.startswith('cache.'):
- options[key[6:]] = val
- coerce_cache_params(options)
-
- # Set cache to enabled if not turned off
- if 'enabled' not in options:
- options['enabled'] = True
-
- # Configure region dict if regions are available
- regions = options.pop('regions', None)
- if regions:
- region_configs = {}
- for region in regions:
- # Setup the default cache options
- region_options = dict(data_dir=options.get('data_dir'),
- lock_dir=options.get('lock_dir'),
- type=options.get('type'),
- enabled=options['enabled'],
- expire=options.get('expire'))
- region_len = len(region) + 1
- for key in options.keys():
- if key.startswith('%s.' % region):
- region_options[key[region_len:]] = options.pop(key)
- coerce_cache_params(region_options)
- region_configs[region] = region_options
- options['cache_regions'] = region_configs
- return options
-
-def func_namespace(func):
- """Generates a unique namespace for a function"""
- kls = None
- if hasattr(func, 'im_func'):
- kls = func.im_class
- func = func.im_func
-
- if kls:
- return '%s.%s' % (kls.__module__, kls.__name__)
- else:
- return '%s.%s' % (func.__module__, func.__name__)