summaryrefslogtreecommitdiffstats
path: root/pyload/lib/beaker/ext
diff options
context:
space:
mode:
Diffstat (limited to 'pyload/lib/beaker/ext')
-rw-r--r--pyload/lib/beaker/ext/__init__.py0
-rw-r--r--pyload/lib/beaker/ext/database.py174
-rw-r--r--pyload/lib/beaker/ext/google.py121
-rw-r--r--pyload/lib/beaker/ext/memcached.py203
-rw-r--r--pyload/lib/beaker/ext/sqla.py136
5 files changed, 634 insertions, 0 deletions
diff --git a/pyload/lib/beaker/ext/__init__.py b/pyload/lib/beaker/ext/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/pyload/lib/beaker/ext/__init__.py
diff --git a/pyload/lib/beaker/ext/database.py b/pyload/lib/beaker/ext/database.py
new file mode 100644
index 000000000..462fb8de4
--- /dev/null
+++ b/pyload/lib/beaker/ext/database.py
@@ -0,0 +1,174 @@
+import cPickle
+import logging
+import pickle
+from datetime import datetime
+
+from beaker.container import OpenResourceNamespaceManager, Container
+from beaker.exceptions import InvalidCacheBackendError, MissingCacheParameter
+from beaker.synchronization import file_synchronizer, null_synchronizer
+from beaker.util import verify_directory, SyncDict
+
+log = logging.getLogger(__name__)
+
+sa = None
+pool = None
+types = None
+
+
+class DatabaseNamespaceManager(OpenResourceNamespaceManager):
+ metadatas = SyncDict()
+ tables = SyncDict()
+
+ @classmethod
+ def _init_dependencies(cls):
+ global sa, pool, types
+ if sa is not None:
+ return
+ try:
+ import sqlalchemy as sa
+ import sqlalchemy.pool as pool
+ from sqlalchemy import types
+ except ImportError:
+ raise InvalidCacheBackendError("Database cache backend requires "
+ "the 'sqlalchemy' library")
+
+ def __init__(self, namespace, url=None, sa_opts=None, optimistic=False,
+ table_name='beaker_cache', data_dir=None, lock_dir=None,
+ schema_name=None, **params):
+ """Creates a database namespace manager
+
+ ``url``
+ SQLAlchemy compliant db url
+ ``sa_opts``
+ A dictionary of SQLAlchemy keyword options to initialize the engine
+ with.
+ ``optimistic``
+ Use optimistic session locking, note that this will result in an
+ additional select when updating a cache value to compare version
+ numbers.
+ ``table_name``
+ The table name to use in the database for the cache.
+ ``schema_name``
+ The schema name to use in the database for the cache.
+ """
+ OpenResourceNamespaceManager.__init__(self, namespace)
+
+ if sa_opts is None:
+ sa_opts = params
+
+ if lock_dir:
+ self.lock_dir = lock_dir
+ elif data_dir:
+ self.lock_dir = data_dir + "/container_db_lock"
+ if self.lock_dir:
+ verify_directory(self.lock_dir)
+
+ # Check to see if the table's been created before
+ url = url or sa_opts['sa.url']
+ table_key = url + table_name
+
+ def make_cache():
+ # Check to see if we have a connection pool open already
+ meta_key = url + table_name
+
+ def make_meta():
+ # SQLAlchemy pops the url, this ensures it sticks around
+ # later
+ sa_opts['sa.url'] = url
+ engine = sa.engine_from_config(sa_opts, 'sa.')
+ meta = sa.MetaData()
+ meta.bind = engine
+ return meta
+ meta = DatabaseNamespaceManager.metadatas.get(meta_key, make_meta)
+ # Create the table object and cache it now
+ cache = sa.Table(table_name, meta,
+ sa.Column('id', types.Integer, primary_key=True),
+ sa.Column('namespace', types.String(255), nullable=False),
+ sa.Column('accessed', types.DateTime, nullable=False),
+ sa.Column('created', types.DateTime, nullable=False),
+ sa.Column('data', types.PickleType, nullable=False),
+ sa.UniqueConstraint('namespace'),
+ schema=schema_name if schema_name else meta.schema
+ )
+ cache.create(checkfirst=True)
+ return cache
+ self.hash = {}
+ self._is_new = False
+ self.loaded = False
+ self.cache = DatabaseNamespaceManager.tables.get(table_key, make_cache)
+
+ def get_access_lock(self):
+ return null_synchronizer()
+
+ def get_creation_lock(self, key):
+ return file_synchronizer(
+ identifier="databasecontainer/funclock/%s/%s" % (
+ self.namespace, key
+ ),
+ lock_dir=self.lock_dir)
+
+ def do_open(self, flags, replace):
+ # If we already loaded the data, don't bother loading it again
+ if self.loaded:
+ self.flags = flags
+ return
+
+ cache = self.cache
+ result = sa.select([cache.c.data],
+ cache.c.namespace == self.namespace
+ ).execute().fetchone()
+ if not result:
+ self._is_new = True
+ self.hash = {}
+ else:
+ self._is_new = False
+ try:
+ self.hash = result['data']
+ except (IOError, OSError, EOFError, cPickle.PickleError,
+ pickle.PickleError):
+ log.debug("Couln't load pickle data, creating new storage")
+ self.hash = {}
+ self._is_new = True
+ self.flags = flags
+ self.loaded = True
+
+ def do_close(self):
+ if self.flags is not None and (self.flags == 'c' or self.flags == 'w'):
+ cache = self.cache
+ if self._is_new:
+ cache.insert().execute(namespace=self.namespace, data=self.hash,
+ accessed=datetime.now(),
+ created=datetime.now())
+ self._is_new = False
+ else:
+ cache.update(cache.c.namespace == self.namespace).execute(
+ data=self.hash, accessed=datetime.now())
+ self.flags = None
+
+ def do_remove(self):
+ cache = self.cache
+ cache.delete(cache.c.namespace == self.namespace).execute()
+ self.hash = {}
+
+ # We can retain the fact that we did a load attempt, but since the
+ # file is gone this will be a new namespace should it be saved.
+ self._is_new = True
+
+ def __getitem__(self, key):
+ return self.hash[key]
+
+ def __contains__(self, key):
+ return key in self.hash
+
+ def __setitem__(self, key, value):
+ self.hash[key] = value
+
+ def __delitem__(self, key):
+ del self.hash[key]
+
+ def keys(self):
+ return self.hash.keys()
+
+
+class DatabaseContainer(Container):
+ namespace_manager = DatabaseNamespaceManager
diff --git a/pyload/lib/beaker/ext/google.py b/pyload/lib/beaker/ext/google.py
new file mode 100644
index 000000000..d0a6205f4
--- /dev/null
+++ b/pyload/lib/beaker/ext/google.py
@@ -0,0 +1,121 @@
+import cPickle
+import logging
+from datetime import datetime
+
+from beaker.container import OpenResourceNamespaceManager, Container
+from beaker.exceptions import InvalidCacheBackendError
+from beaker.synchronization import null_synchronizer
+
+log = logging.getLogger(__name__)
+
+db = None
+
+
+class GoogleNamespaceManager(OpenResourceNamespaceManager):
+ tables = {}
+
+ @classmethod
+ def _init_dependencies(cls):
+ global db
+ if db is not None:
+ return
+ try:
+ db = __import__('google.appengine.ext.db').appengine.ext.db
+ except ImportError:
+ raise InvalidCacheBackendError("Datastore cache backend requires the "
+ "'google.appengine.ext' library")
+
+ def __init__(self, namespace, table_name='beaker_cache', **params):
+ """Creates a datastore namespace manager"""
+ OpenResourceNamespaceManager.__init__(self, namespace)
+
+ def make_cache():
+ table_dict = dict(created=db.DateTimeProperty(),
+ accessed=db.DateTimeProperty(),
+ data=db.BlobProperty())
+ table = type(table_name, (db.Model,), table_dict)
+ return table
+ self.table_name = table_name
+ self.cache = GoogleNamespaceManager.tables.setdefault(table_name, make_cache())
+ self.hash = {}
+ self._is_new = False
+ self.loaded = False
+ self.log_debug = logging.DEBUG >= log.getEffectiveLevel()
+
+ # Google wants namespaces to start with letters, change the namespace
+ # to start with a letter
+ self.namespace = 'p%s' % self.namespace
+
+ def get_access_lock(self):
+ return null_synchronizer()
+
+ def get_creation_lock(self, key):
+ # this is weird, should probably be present
+ return null_synchronizer()
+
+ def do_open(self, flags, replace):
+ # If we already loaded the data, don't bother loading it again
+ if self.loaded:
+ self.flags = flags
+ return
+
+ item = self.cache.get_by_key_name(self.namespace)
+
+ if not item:
+ self._is_new = True
+ self.hash = {}
+ else:
+ self._is_new = False
+ try:
+ self.hash = cPickle.loads(str(item.data))
+ except (IOError, OSError, EOFError, cPickle.PickleError):
+ if self.log_debug:
+ log.debug("Couln't load pickle data, creating new storage")
+ self.hash = {}
+ self._is_new = True
+ self.flags = flags
+ self.loaded = True
+
+ def do_close(self):
+ if self.flags is not None and (self.flags == 'c' or self.flags == 'w'):
+ if self._is_new:
+ item = self.cache(key_name=self.namespace)
+ item.data = cPickle.dumps(self.hash)
+ item.created = datetime.now()
+ item.accessed = datetime.now()
+ item.put()
+ self._is_new = False
+ else:
+ item = self.cache.get_by_key_name(self.namespace)
+ item.data = cPickle.dumps(self.hash)
+ item.accessed = datetime.now()
+ item.put()
+ self.flags = None
+
+ def do_remove(self):
+ item = self.cache.get_by_key_name(self.namespace)
+ item.delete()
+ self.hash = {}
+
+ # We can retain the fact that we did a load attempt, but since the
+ # file is gone this will be a new namespace should it be saved.
+ self._is_new = True
+
+ def __getitem__(self, key):
+ return self.hash[key]
+
+ def __contains__(self, key):
+ return key in self.hash
+
+ def __setitem__(self, key, value):
+ self.hash[key] = value
+
+ def __delitem__(self, key):
+ del self.hash[key]
+
+ def keys(self):
+ return self.hash.keys()
+
+
+class GoogleContainer(Container):
+ namespace_class = GoogleNamespaceManager
diff --git a/pyload/lib/beaker/ext/memcached.py b/pyload/lib/beaker/ext/memcached.py
new file mode 100644
index 000000000..94e3da3c9
--- /dev/null
+++ b/pyload/lib/beaker/ext/memcached.py
@@ -0,0 +1,203 @@
+from __future__ import with_statement
+from beaker.container import NamespaceManager, Container
+from beaker.crypto.util import sha1
+from beaker.exceptions import InvalidCacheBackendError, MissingCacheParameter
+from beaker.synchronization import file_synchronizer
+from beaker.util import verify_directory, SyncDict, parse_memcached_behaviors
+import warnings
+
+MAX_KEY_LENGTH = 250
+
+_client_libs = {}
+
+
+def _load_client(name='auto'):
+ if name in _client_libs:
+ return _client_libs[name]
+
+ def _pylibmc():
+ global pylibmc
+ import pylibmc
+ return pylibmc
+
+ def _cmemcache():
+ global cmemcache
+ import cmemcache
+ warnings.warn("cmemcache is known to have serious "
+ "concurrency issues; consider using 'memcache' "
+ "or 'pylibmc'")
+ return cmemcache
+
+ def _memcache():
+ global memcache
+ import memcache
+ return memcache
+
+ def _auto():
+ for _client in (_pylibmc, _cmemcache, _memcache):
+ try:
+ return _client()
+ except ImportError:
+ pass
+ else:
+ raise InvalidCacheBackendError(
+ "Memcached cache backend requires one "
+ "of: 'pylibmc' or 'memcache' to be installed.")
+
+ clients = {
+ 'pylibmc': _pylibmc,
+ 'cmemcache': _cmemcache,
+ 'memcache': _memcache,
+ 'auto': _auto
+ }
+ _client_libs[name] = clib = clients[name]()
+ return clib
+
+
+def _is_configured_for_pylibmc(memcache_module_config, memcache_client):
+ return memcache_module_config == 'pylibmc' or \
+ memcache_client.__name__.startswith('pylibmc')
+
+
+class MemcachedNamespaceManager(NamespaceManager):
+ """Provides the :class:`.NamespaceManager` API over a memcache client library."""
+
+ clients = SyncDict()
+
+ def __new__(cls, *args, **kw):
+ memcache_module = kw.pop('memcache_module', 'auto')
+
+ memcache_client = _load_client(memcache_module)
+
+ if _is_configured_for_pylibmc(memcache_module, memcache_client):
+ return object.__new__(PyLibMCNamespaceManager)
+ else:
+ return object.__new__(MemcachedNamespaceManager)
+
+ def __init__(self, namespace, url,
+ memcache_module='auto',
+ data_dir=None, lock_dir=None,
+ **kw):
+ NamespaceManager.__init__(self, namespace)
+
+ _memcache_module = _client_libs[memcache_module]
+
+ if not url:
+ raise MissingCacheParameter("url is required")
+
+ if lock_dir:
+ self.lock_dir = lock_dir
+ elif data_dir:
+ self.lock_dir = data_dir + "/container_mcd_lock"
+ if self.lock_dir:
+ verify_directory(self.lock_dir)
+
+ # Check for pylibmc namespace manager, in which case client will be
+ # instantiated by subclass __init__, to handle behavior passing to the
+ # pylibmc client
+ if not _is_configured_for_pylibmc(memcache_module, _memcache_module):
+ self.mc = MemcachedNamespaceManager.clients.get(
+ (memcache_module, url),
+ _memcache_module.Client,
+ url.split(';'))
+
+ def get_creation_lock(self, key):
+ return file_synchronizer(
+ identifier="memcachedcontainer/funclock/%s/%s" %
+ (self.namespace, key), lock_dir=self.lock_dir)
+
+ def _format_key(self, key):
+ if not isinstance(key, str):
+ key = key.decode('ascii')
+ formated_key = (self.namespace + '_' + key).replace(' ', '\302\267')
+ if len(formated_key) > MAX_KEY_LENGTH:
+ formated_key = sha1(formated_key).hexdigest()
+ return formated_key
+
+ def __getitem__(self, key):
+ return self.mc.get(self._format_key(key))
+
+ def __contains__(self, key):
+ value = self.mc.get(self._format_key(key))
+ return value is not None
+
+ def has_key(self, key):
+ return key in self
+
+ def set_value(self, key, value, expiretime=None):
+ if expiretime:
+ self.mc.set(self._format_key(key), value, time=expiretime)
+ else:
+ self.mc.set(self._format_key(key), value)
+
+ def __setitem__(self, key, value):
+ self.set_value(key, value)
+
+ def __delitem__(self, key):
+ self.mc.delete(self._format_key(key))
+
+ def do_remove(self):
+ self.mc.flush_all()
+
+ def keys(self):
+ raise NotImplementedError(
+ "Memcache caching does not "
+ "support iteration of all cache keys")
+
+
+class PyLibMCNamespaceManager(MemcachedNamespaceManager):
+ """Provide thread-local support for pylibmc."""
+
+ def __init__(self, *arg, **kw):
+ super(PyLibMCNamespaceManager, self).__init__(*arg, **kw)
+
+ memcache_module = kw.get('memcache_module', 'auto')
+ _memcache_module = _client_libs[memcache_module]
+ protocol = kw.get('protocol', 'text')
+ username = kw.get('username', None)
+ password = kw.get('password', None)
+ url = kw.get('url')
+ behaviors = parse_memcached_behaviors(kw)
+
+ self.mc = MemcachedNamespaceManager.clients.get(
+ (memcache_module, url),
+ _memcache_module.Client,
+ servers=url.split(';'), behaviors=behaviors,
+ binary=(protocol == 'binary'), username=username,
+ password=password)
+ self.pool = pylibmc.ThreadMappedPool(self.mc)
+
+ def __getitem__(self, key):
+ with self.pool.reserve() as mc:
+ return mc.get(self._format_key(key))
+
+ def __contains__(self, key):
+ with self.pool.reserve() as mc:
+ value = mc.get(self._format_key(key))
+ return value is not None
+
+ def has_key(self, key):
+ return key in self
+
+ def set_value(self, key, value, expiretime=None):
+ with self.pool.reserve() as mc:
+ if expiretime:
+ mc.set(self._format_key(key), value, time=expiretime)
+ else:
+ mc.set(self._format_key(key), value)
+
+ def __setitem__(self, key, value):
+ self.set_value(key, value)
+
+ def __delitem__(self, key):
+ with self.pool.reserve() as mc:
+ mc.delete(self._format_key(key))
+
+ def do_remove(self):
+ with self.pool.reserve() as mc:
+ mc.flush_all()
+
+
+class MemcachedContainer(Container):
+ """Container class which invokes :class:`.MemcacheNamespaceManager`."""
+ namespace_class = MemcachedNamespaceManager
diff --git a/pyload/lib/beaker/ext/sqla.py b/pyload/lib/beaker/ext/sqla.py
new file mode 100644
index 000000000..6405c2919
--- /dev/null
+++ b/pyload/lib/beaker/ext/sqla.py
@@ -0,0 +1,136 @@
+import cPickle
+import logging
+import pickle
+from datetime import datetime
+
+from beaker.container import OpenResourceNamespaceManager, Container
+from beaker.exceptions import InvalidCacheBackendError, MissingCacheParameter
+from beaker.synchronization import file_synchronizer, null_synchronizer
+from beaker.util import verify_directory, SyncDict
+
+
+log = logging.getLogger(__name__)
+
+sa = None
+
+
+class SqlaNamespaceManager(OpenResourceNamespaceManager):
+ binds = SyncDict()
+ tables = SyncDict()
+
+ @classmethod
+ def _init_dependencies(cls):
+ global sa
+ if sa is not None:
+ return
+ try:
+ import sqlalchemy as sa
+ except ImportError:
+ raise InvalidCacheBackendError("SQLAlchemy, which is required by "
+ "this backend, is not installed")
+
+ def __init__(self, namespace, bind, table, data_dir=None, lock_dir=None,
+ **kwargs):
+ """Create a namespace manager for use with a database table via
+ SQLAlchemy.
+
+ ``bind``
+ SQLAlchemy ``Engine`` or ``Connection`` object
+
+ ``table``
+ SQLAlchemy ``Table`` object in which to store namespace data.
+ This should usually be something created by ``make_cache_table``.
+ """
+ OpenResourceNamespaceManager.__init__(self, namespace)
+
+ if lock_dir:
+ self.lock_dir = lock_dir
+ elif data_dir:
+ self.lock_dir = data_dir + "/container_db_lock"
+ if self.lock_dir:
+ verify_directory(self.lock_dir)
+
+ self.bind = self.__class__.binds.get(str(bind.url), lambda: bind)
+ self.table = self.__class__.tables.get('%s:%s' % (bind.url, table.name),
+ lambda: table)
+ self.hash = {}
+ self._is_new = False
+ self.loaded = False
+
+ def get_access_lock(self):
+ return null_synchronizer()
+
+ def get_creation_lock(self, key):
+ return file_synchronizer(
+ identifier="databasecontainer/funclock/%s" % self.namespace,
+ lock_dir=self.lock_dir)
+
+ def do_open(self, flags, replace):
+ if self.loaded:
+ self.flags = flags
+ return
+ select = sa.select([self.table.c.data],
+ (self.table.c.namespace == self.namespace))
+ result = self.bind.execute(select).fetchone()
+ if not result:
+ self._is_new = True
+ self.hash = {}
+ else:
+ self._is_new = False
+ try:
+ self.hash = result['data']
+ except (IOError, OSError, EOFError, cPickle.PickleError,
+ pickle.PickleError):
+ log.debug("Couln't load pickle data, creating new storage")
+ self.hash = {}
+ self._is_new = True
+ self.flags = flags
+ self.loaded = True
+
+ def do_close(self):
+ if self.flags is not None and (self.flags == 'c' or self.flags == 'w'):
+ if self._is_new:
+ insert = self.table.insert()
+ self.bind.execute(insert, namespace=self.namespace, data=self.hash,
+ accessed=datetime.now(), created=datetime.now())
+ self._is_new = False
+ else:
+ update = self.table.update(self.table.c.namespace == self.namespace)
+ self.bind.execute(update, data=self.hash, accessed=datetime.now())
+ self.flags = None
+
+ def do_remove(self):
+ delete = self.table.delete(self.table.c.namespace == self.namespace)
+ self.bind.execute(delete)
+ self.hash = {}
+ self._is_new = True
+
+ def __getitem__(self, key):
+ return self.hash[key]
+
+ def __contains__(self, key):
+ return key in self.hash
+
+ def __setitem__(self, key, value):
+ self.hash[key] = value
+
+ def __delitem__(self, key):
+ del self.hash[key]
+
+ def keys(self):
+ return self.hash.keys()
+
+
+class SqlaContainer(Container):
+ namespace_manager = SqlaNamespaceManager
+
+
+def make_cache_table(metadata, table_name='beaker_cache', schema_name=None):
+ """Return a ``Table`` object suitable for storing cached values for the
+ namespace manager. Do not create the table."""
+ return sa.Table(table_name, metadata,
+ sa.Column('namespace', sa.String(255), primary_key=True),
+ sa.Column('accessed', sa.DateTime, nullable=False),
+ sa.Column('created', sa.DateTime, nullable=False),
+ sa.Column('data', sa.PickleType, nullable=False),
+ schema=schema_name if schema_name else metadata.schema)