summaryrefslogtreecommitdiffstats
path: root/module/lib/beaker/ext
diff options
context:
space:
mode:
Diffstat (limited to 'module/lib/beaker/ext')
-rw-r--r--module/lib/beaker/ext/database.py55
-rw-r--r--module/lib/beaker/ext/google.py29
-rw-r--r--module/lib/beaker/ext/memcached.py189
-rw-r--r--module/lib/beaker/ext/sqla.py15
4 files changed, 211 insertions, 77 deletions
diff --git a/module/lib/beaker/ext/database.py b/module/lib/beaker/ext/database.py
index 701e6f7d2..462fb8de4 100644
--- a/module/lib/beaker/ext/database.py
+++ b/module/lib/beaker/ext/database.py
@@ -14,6 +14,7 @@ sa = None
pool = None
types = None
+
class DatabaseNamespaceManager(OpenResourceNamespaceManager):
metadatas = SyncDict()
tables = SyncDict()
@@ -30,12 +31,12 @@ class DatabaseNamespaceManager(OpenResourceNamespaceManager):
except ImportError:
raise InvalidCacheBackendError("Database cache backend requires "
"the 'sqlalchemy' library")
-
+
def __init__(self, namespace, url=None, sa_opts=None, optimistic=False,
table_name='beaker_cache', data_dir=None, lock_dir=None,
- **params):
+ schema_name=None, **params):
"""Creates a database namespace manager
-
+
``url``
SQLAlchemy compliant db url
``sa_opts``
@@ -47,9 +48,11 @@ class DatabaseNamespaceManager(OpenResourceNamespaceManager):
numbers.
``table_name``
The table name to use in the database for the cache.
+ ``schema_name``
+ The schema name to use in the database for the cache.
"""
OpenResourceNamespaceManager.__init__(self, namespace)
-
+
if sa_opts is None:
sa_opts = params
@@ -58,14 +61,16 @@ class DatabaseNamespaceManager(OpenResourceNamespaceManager):
elif data_dir:
self.lock_dir = data_dir + "/container_db_lock"
if self.lock_dir:
- verify_directory(self.lock_dir)
-
+ verify_directory(self.lock_dir)
+
# Check to see if the table's been created before
url = url or sa_opts['sa.url']
table_key = url + table_name
+
def make_cache():
# Check to see if we have a connection pool open already
meta_key = url + table_name
+
def make_meta():
# SQLAlchemy pops the url, this ensures it sticks around
# later
@@ -82,7 +87,8 @@ class DatabaseNamespaceManager(OpenResourceNamespaceManager):
sa.Column('accessed', types.DateTime, nullable=False),
sa.Column('created', types.DateTime, nullable=False),
sa.Column('data', types.PickleType, nullable=False),
- sa.UniqueConstraint('namespace')
+ sa.UniqueConstraint('namespace'),
+ schema=schema_name if schema_name else meta.schema
)
cache.create(checkfirst=True)
return cache
@@ -90,24 +96,26 @@ class DatabaseNamespaceManager(OpenResourceNamespaceManager):
self._is_new = False
self.loaded = False
self.cache = DatabaseNamespaceManager.tables.get(table_key, make_cache)
-
+
def get_access_lock(self):
return null_synchronizer()
def get_creation_lock(self, key):
return file_synchronizer(
- identifier ="databasecontainer/funclock/%s" % self.namespace,
- lock_dir = self.lock_dir)
+ identifier="databasecontainer/funclock/%s/%s" % (
+ self.namespace, key
+ ),
+ lock_dir=self.lock_dir)
- def do_open(self, flags):
+ def do_open(self, flags, replace):
# If we already loaded the data, don't bother loading it again
if self.loaded:
self.flags = flags
return
-
+
cache = self.cache
- result = sa.select([cache.c.data],
- cache.c.namespace==self.namespace
+ result = sa.select([cache.c.data],
+ cache.c.namespace == self.namespace
).execute().fetchone()
if not result:
self._is_new = True
@@ -123,7 +131,7 @@ class DatabaseNamespaceManager(OpenResourceNamespaceManager):
self._is_new = True
self.flags = flags
self.loaded = True
-
+
def do_close(self):
if self.flags is not None and (self.flags == 'c' or self.flags == 'w'):
cache = self.cache
@@ -133,25 +141,25 @@ class DatabaseNamespaceManager(OpenResourceNamespaceManager):
created=datetime.now())
self._is_new = False
else:
- cache.update(cache.c.namespace==self.namespace).execute(
+ cache.update(cache.c.namespace == self.namespace).execute(
data=self.hash, accessed=datetime.now())
self.flags = None
-
+
def do_remove(self):
cache = self.cache
- cache.delete(cache.c.namespace==self.namespace).execute()
+ cache.delete(cache.c.namespace == self.namespace).execute()
self.hash = {}
-
+
# We can retain the fact that we did a load attempt, but since the
# file is gone this will be a new namespace should it be saved.
self._is_new = True
- def __getitem__(self, key):
+ def __getitem__(self, key):
return self.hash[key]
- def __contains__(self, key):
- return self.hash.has_key(key)
-
+ def __contains__(self, key):
+ return key in self.hash
+
def __setitem__(self, key, value):
self.hash[key] = value
@@ -161,5 +169,6 @@ class DatabaseNamespaceManager(OpenResourceNamespaceManager):
def keys(self):
return self.hash.keys()
+
class DatabaseContainer(Container):
namespace_manager = DatabaseNamespaceManager
diff --git a/module/lib/beaker/ext/google.py b/module/lib/beaker/ext/google.py
index dd8380d7f..d0a6205f4 100644
--- a/module/lib/beaker/ext/google.py
+++ b/module/lib/beaker/ext/google.py
@@ -10,6 +10,7 @@ log = logging.getLogger(__name__)
db = None
+
class GoogleNamespaceManager(OpenResourceNamespaceManager):
tables = {}
@@ -23,11 +24,11 @@ class GoogleNamespaceManager(OpenResourceNamespaceManager):
except ImportError:
raise InvalidCacheBackendError("Datastore cache backend requires the "
"'google.appengine.ext' library")
-
+
def __init__(self, namespace, table_name='beaker_cache', **params):
"""Creates a datastore namespace manager"""
OpenResourceNamespaceManager.__init__(self, namespace)
-
+
def make_cache():
table_dict = dict(created=db.DateTimeProperty(),
accessed=db.DateTimeProperty(),
@@ -40,11 +41,11 @@ class GoogleNamespaceManager(OpenResourceNamespaceManager):
self._is_new = False
self.loaded = False
self.log_debug = logging.DEBUG >= log.getEffectiveLevel()
-
+
# Google wants namespaces to start with letters, change the namespace
# to start with a letter
self.namespace = 'p%s' % self.namespace
-
+
def get_access_lock(self):
return null_synchronizer()
@@ -52,14 +53,14 @@ class GoogleNamespaceManager(OpenResourceNamespaceManager):
# this is weird, should probably be present
return null_synchronizer()
- def do_open(self, flags):
+ def do_open(self, flags, replace):
# If we already loaded the data, don't bother loading it again
if self.loaded:
self.flags = flags
return
-
+
item = self.cache.get_by_key_name(self.namespace)
-
+
if not item:
self._is_new = True
self.hash = {}
@@ -74,7 +75,7 @@ class GoogleNamespaceManager(OpenResourceNamespaceManager):
self._is_new = True
self.flags = flags
self.loaded = True
-
+
def do_close(self):
if self.flags is not None and (self.flags == 'c' or self.flags == 'w'):
if self._is_new:
@@ -90,12 +91,12 @@ class GoogleNamespaceManager(OpenResourceNamespaceManager):
item.accessed = datetime.now()
item.put()
self.flags = None
-
+
def do_remove(self):
item = self.cache.get_by_key_name(self.namespace)
item.delete()
self.hash = {}
-
+
# We can retain the fact that we did a load attempt, but since the
# file is gone this will be a new namespace should it be saved.
self._is_new = True
@@ -103,9 +104,9 @@ class GoogleNamespaceManager(OpenResourceNamespaceManager):
def __getitem__(self, key):
return self.hash[key]
- def __contains__(self, key):
- return self.hash.has_key(key)
-
+ def __contains__(self, key):
+ return key in self.hash
+
def __setitem__(self, key, value):
self.hash[key] = value
@@ -114,7 +115,7 @@ class GoogleNamespaceManager(OpenResourceNamespaceManager):
def keys(self):
return self.hash.keys()
-
+
class GoogleContainer(Container):
namespace_class = GoogleNamespaceManager
diff --git a/module/lib/beaker/ext/memcached.py b/module/lib/beaker/ext/memcached.py
index 96516953f..94e3da3c9 100644
--- a/module/lib/beaker/ext/memcached.py
+++ b/module/lib/beaker/ext/memcached.py
@@ -1,54 +1,118 @@
+from __future__ import with_statement
from beaker.container import NamespaceManager, Container
+from beaker.crypto.util import sha1
from beaker.exceptions import InvalidCacheBackendError, MissingCacheParameter
-from beaker.synchronization import file_synchronizer, null_synchronizer
-from beaker.util import verify_directory, SyncDict
+from beaker.synchronization import file_synchronizer
+from beaker.util import verify_directory, SyncDict, parse_memcached_behaviors
import warnings
-memcache = None
+MAX_KEY_LENGTH = 250
-class MemcachedNamespaceManager(NamespaceManager):
- clients = SyncDict()
-
- @classmethod
- def _init_dependencies(cls):
+_client_libs = {}
+
+
+def _load_client(name='auto'):
+ if name in _client_libs:
+ return _client_libs[name]
+
+ def _pylibmc():
+ global pylibmc
+ import pylibmc
+ return pylibmc
+
+ def _cmemcache():
+ global cmemcache
+ import cmemcache
+ warnings.warn("cmemcache is known to have serious "
+ "concurrency issues; consider using 'memcache' "
+ "or 'pylibmc'")
+ return cmemcache
+
+ def _memcache():
global memcache
- if memcache is not None:
- return
- try:
- import pylibmc as memcache
- except ImportError:
+ import memcache
+ return memcache
+
+ def _auto():
+ for _client in (_pylibmc, _cmemcache, _memcache):
try:
- import cmemcache as memcache
- warnings.warn("cmemcache is known to have serious "
- "concurrency issues; consider using 'memcache' or 'pylibmc'")
+ return _client()
except ImportError:
- try:
- import memcache
- except ImportError:
- raise InvalidCacheBackendError("Memcached cache backend requires either "
- "the 'memcache' or 'cmemcache' library")
-
- def __init__(self, namespace, url=None, data_dir=None, lock_dir=None, **params):
+ pass
+ else:
+ raise InvalidCacheBackendError(
+ "Memcached cache backend requires one "
+ "of: 'pylibmc' or 'memcache' to be installed.")
+
+ clients = {
+ 'pylibmc': _pylibmc,
+ 'cmemcache': _cmemcache,
+ 'memcache': _memcache,
+ 'auto': _auto
+ }
+ _client_libs[name] = clib = clients[name]()
+ return clib
+
+
+def _is_configured_for_pylibmc(memcache_module_config, memcache_client):
+ return memcache_module_config == 'pylibmc' or \
+ memcache_client.__name__.startswith('pylibmc')
+
+
+class MemcachedNamespaceManager(NamespaceManager):
+ """Provides the :class:`.NamespaceManager` API over a memcache client library."""
+
+ clients = SyncDict()
+
+ def __new__(cls, *args, **kw):
+ memcache_module = kw.pop('memcache_module', 'auto')
+
+ memcache_client = _load_client(memcache_module)
+
+ if _is_configured_for_pylibmc(memcache_module, memcache_client):
+ return object.__new__(PyLibMCNamespaceManager)
+ else:
+ return object.__new__(MemcachedNamespaceManager)
+
+ def __init__(self, namespace, url,
+ memcache_module='auto',
+ data_dir=None, lock_dir=None,
+ **kw):
NamespaceManager.__init__(self, namespace)
-
+
+ _memcache_module = _client_libs[memcache_module]
+
if not url:
- raise MissingCacheParameter("url is required")
-
+ raise MissingCacheParameter("url is required")
+
if lock_dir:
self.lock_dir = lock_dir
elif data_dir:
self.lock_dir = data_dir + "/container_mcd_lock"
if self.lock_dir:
- verify_directory(self.lock_dir)
-
- self.mc = MemcachedNamespaceManager.clients.get(url, memcache.Client, url.split(';'))
+ verify_directory(self.lock_dir)
+
+ # Check for pylibmc namespace manager, in which case client will be
+ # instantiated by subclass __init__, to handle behavior passing to the
+ # pylibmc client
+ if not _is_configured_for_pylibmc(memcache_module, _memcache_module):
+ self.mc = MemcachedNamespaceManager.clients.get(
+ (memcache_module, url),
+ _memcache_module.Client,
+ url.split(';'))
def get_creation_lock(self, key):
return file_synchronizer(
- identifier="memcachedcontainer/funclock/%s" % self.namespace,lock_dir = self.lock_dir)
+ identifier="memcachedcontainer/funclock/%s/%s" %
+ (self.namespace, key), lock_dir=self.lock_dir)
def _format_key(self, key):
- return self.namespace + '_' + key.replace(' ', '\302\267')
+ if not isinstance(key, str):
+ key = key.decode('ascii')
+ formated_key = (self.namespace + '_' + key).replace(' ', '\302\267')
+ if len(formated_key) > MAX_KEY_LENGTH:
+ formated_key = sha1(formated_key).hexdigest()
+ return formated_key
def __getitem__(self, key):
return self.mc.get(self._format_key(key))
@@ -68,15 +132,72 @@ class MemcachedNamespaceManager(NamespaceManager):
def __setitem__(self, key, value):
self.set_value(key, value)
-
+
def __delitem__(self, key):
self.mc.delete(self._format_key(key))
def do_remove(self):
self.mc.flush_all()
-
+
def keys(self):
- raise NotImplementedError("Memcache caching does not support iteration of all cache keys")
+ raise NotImplementedError(
+ "Memcache caching does not "
+ "support iteration of all cache keys")
+
+
+class PyLibMCNamespaceManager(MemcachedNamespaceManager):
+ """Provide thread-local support for pylibmc."""
+
+ def __init__(self, *arg, **kw):
+ super(PyLibMCNamespaceManager, self).__init__(*arg, **kw)
+
+ memcache_module = kw.get('memcache_module', 'auto')
+ _memcache_module = _client_libs[memcache_module]
+ protocol = kw.get('protocol', 'text')
+ username = kw.get('username', None)
+ password = kw.get('password', None)
+ url = kw.get('url')
+ behaviors = parse_memcached_behaviors(kw)
+
+ self.mc = MemcachedNamespaceManager.clients.get(
+ (memcache_module, url),
+ _memcache_module.Client,
+ servers=url.split(';'), behaviors=behaviors,
+ binary=(protocol == 'binary'), username=username,
+ password=password)
+ self.pool = pylibmc.ThreadMappedPool(self.mc)
+
+ def __getitem__(self, key):
+ with self.pool.reserve() as mc:
+ return mc.get(self._format_key(key))
+
+ def __contains__(self, key):
+ with self.pool.reserve() as mc:
+ value = mc.get(self._format_key(key))
+ return value is not None
+
+ def has_key(self, key):
+ return key in self
+
+ def set_value(self, key, value, expiretime=None):
+ with self.pool.reserve() as mc:
+ if expiretime:
+ mc.set(self._format_key(key), value, time=expiretime)
+ else:
+ mc.set(self._format_key(key), value)
+
+ def __setitem__(self, key, value):
+ self.set_value(key, value)
+
+ def __delitem__(self, key):
+ with self.pool.reserve() as mc:
+ mc.delete(self._format_key(key))
+
+ def do_remove(self):
+ with self.pool.reserve() as mc:
+ mc.flush_all()
+
class MemcachedContainer(Container):
+ """Container class which invokes :class:`.MemcacheNamespaceManager`."""
namespace_class = MemcachedNamespaceManager
diff --git a/module/lib/beaker/ext/sqla.py b/module/lib/beaker/ext/sqla.py
index 8c79633c1..6405c2919 100644
--- a/module/lib/beaker/ext/sqla.py
+++ b/module/lib/beaker/ext/sqla.py
@@ -13,6 +13,7 @@ log = logging.getLogger(__name__)
sa = None
+
class SqlaNamespaceManager(OpenResourceNamespaceManager):
binds = SyncDict()
tables = SyncDict()
@@ -47,7 +48,7 @@ class SqlaNamespaceManager(OpenResourceNamespaceManager):
elif data_dir:
self.lock_dir = data_dir + "/container_db_lock"
if self.lock_dir:
- verify_directory(self.lock_dir)
+ verify_directory(self.lock_dir)
self.bind = self.__class__.binds.get(str(bind.url), lambda: bind)
self.table = self.__class__.tables.get('%s:%s' % (bind.url, table.name),
@@ -61,10 +62,10 @@ class SqlaNamespaceManager(OpenResourceNamespaceManager):
def get_creation_lock(self, key):
return file_synchronizer(
- identifier ="databasecontainer/funclock/%s" % self.namespace,
+ identifier="databasecontainer/funclock/%s" % self.namespace,
lock_dir=self.lock_dir)
- def do_open(self, flags):
+ def do_open(self, flags, replace):
if self.loaded:
self.flags = flags
return
@@ -108,7 +109,7 @@ class SqlaNamespaceManager(OpenResourceNamespaceManager):
return self.hash[key]
def __contains__(self, key):
- return self.hash.has_key(key)
+ return key in self.hash
def __setitem__(self, key, value):
self.hash[key] = value
@@ -123,11 +124,13 @@ class SqlaNamespaceManager(OpenResourceNamespaceManager):
class SqlaContainer(Container):
namespace_manager = SqlaNamespaceManager
-def make_cache_table(metadata, table_name='beaker_cache'):
+
+def make_cache_table(metadata, table_name='beaker_cache', schema_name=None):
"""Return a ``Table`` object suitable for storing cached values for the
namespace manager. Do not create the table."""
return sa.Table(table_name, metadata,
sa.Column('namespace', sa.String(255), primary_key=True),
sa.Column('accessed', sa.DateTime, nullable=False),
sa.Column('created', sa.DateTime, nullable=False),
- sa.Column('data', sa.PickleType, nullable=False))
+ sa.Column('data', sa.PickleType, nullable=False),
+ schema=schema_name if schema_name else metadata.schema)