diff options
Diffstat (limited to 'lib/Python/Lib/beaker/ext')
-rw-r--r-- | lib/Python/Lib/beaker/ext/__init__.py | 0 | ||||
-rw-r--r-- | lib/Python/Lib/beaker/ext/database.py | 165 | ||||
-rw-r--r-- | lib/Python/Lib/beaker/ext/google.py | 120 | ||||
-rw-r--r-- | lib/Python/Lib/beaker/ext/memcached.py | 82 | ||||
-rw-r--r-- | lib/Python/Lib/beaker/ext/sqla.py | 133 |
5 files changed, 500 insertions, 0 deletions
diff --git a/lib/Python/Lib/beaker/ext/__init__.py b/lib/Python/Lib/beaker/ext/__init__.py new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/lib/Python/Lib/beaker/ext/__init__.py diff --git a/lib/Python/Lib/beaker/ext/database.py b/lib/Python/Lib/beaker/ext/database.py new file mode 100644 index 000000000..701e6f7d2 --- /dev/null +++ b/lib/Python/Lib/beaker/ext/database.py @@ -0,0 +1,165 @@ +import cPickle +import logging +import pickle +from datetime import datetime + +from beaker.container import OpenResourceNamespaceManager, Container +from beaker.exceptions import InvalidCacheBackendError, MissingCacheParameter +from beaker.synchronization import file_synchronizer, null_synchronizer +from beaker.util import verify_directory, SyncDict + +log = logging.getLogger(__name__) + +sa = None +pool = None +types = None + +class DatabaseNamespaceManager(OpenResourceNamespaceManager): + metadatas = SyncDict() + tables = SyncDict() + + @classmethod + def _init_dependencies(cls): + global sa, pool, types + if sa is not None: + return + try: + import sqlalchemy as sa + import sqlalchemy.pool as pool + from sqlalchemy import types + except ImportError: + raise InvalidCacheBackendError("Database cache backend requires " + "the 'sqlalchemy' library") + + def __init__(self, namespace, url=None, sa_opts=None, optimistic=False, + table_name='beaker_cache', data_dir=None, lock_dir=None, + **params): + """Creates a database namespace manager + + ``url`` + SQLAlchemy compliant db url + ``sa_opts`` + A dictionary of SQLAlchemy keyword options to initialize the engine + with. + ``optimistic`` + Use optimistic session locking, note that this will result in an + additional select when updating a cache value to compare version + numbers. + ``table_name`` + The table name to use in the database for the cache. + """ + OpenResourceNamespaceManager.__init__(self, namespace) + + if sa_opts is None: + sa_opts = params + + if lock_dir: + self.lock_dir = lock_dir + elif data_dir: + self.lock_dir = data_dir + "/container_db_lock" + if self.lock_dir: + verify_directory(self.lock_dir) + + # Check to see if the table's been created before + url = url or sa_opts['sa.url'] + table_key = url + table_name + def make_cache(): + # Check to see if we have a connection pool open already + meta_key = url + table_name + def make_meta(): + # SQLAlchemy pops the url, this ensures it sticks around + # later + sa_opts['sa.url'] = url + engine = sa.engine_from_config(sa_opts, 'sa.') + meta = sa.MetaData() + meta.bind = engine + return meta + meta = DatabaseNamespaceManager.metadatas.get(meta_key, make_meta) + # Create the table object and cache it now + cache = sa.Table(table_name, meta, + sa.Column('id', types.Integer, primary_key=True), + sa.Column('namespace', types.String(255), nullable=False), + sa.Column('accessed', types.DateTime, nullable=False), + sa.Column('created', types.DateTime, nullable=False), + sa.Column('data', types.PickleType, nullable=False), + sa.UniqueConstraint('namespace') + ) + cache.create(checkfirst=True) + return cache + self.hash = {} + self._is_new = False + self.loaded = False + self.cache = DatabaseNamespaceManager.tables.get(table_key, make_cache) + + def get_access_lock(self): + return null_synchronizer() + + def get_creation_lock(self, key): + return file_synchronizer( + identifier ="databasecontainer/funclock/%s" % self.namespace, + lock_dir = self.lock_dir) + + def do_open(self, flags): + # If we already loaded the data, don't bother loading it again + if self.loaded: + self.flags = flags + return + + cache = self.cache + result = sa.select([cache.c.data], + cache.c.namespace==self.namespace + ).execute().fetchone() + if not result: + self._is_new = True + self.hash = {} + else: + self._is_new = False + try: + self.hash = result['data'] + except (IOError, OSError, EOFError, cPickle.PickleError, + pickle.PickleError): + log.debug("Couln't load pickle data, creating new storage") + self.hash = {} + self._is_new = True + self.flags = flags + self.loaded = True + + def do_close(self): + if self.flags is not None and (self.flags == 'c' or self.flags == 'w'): + cache = self.cache + if self._is_new: + cache.insert().execute(namespace=self.namespace, data=self.hash, + accessed=datetime.now(), + created=datetime.now()) + self._is_new = False + else: + cache.update(cache.c.namespace==self.namespace).execute( + data=self.hash, accessed=datetime.now()) + self.flags = None + + def do_remove(self): + cache = self.cache + cache.delete(cache.c.namespace==self.namespace).execute() + self.hash = {} + + # We can retain the fact that we did a load attempt, but since the + # file is gone this will be a new namespace should it be saved. + self._is_new = True + + def __getitem__(self, key): + return self.hash[key] + + def __contains__(self, key): + return self.hash.has_key(key) + + def __setitem__(self, key, value): + self.hash[key] = value + + def __delitem__(self, key): + del self.hash[key] + + def keys(self): + return self.hash.keys() + +class DatabaseContainer(Container): + namespace_manager = DatabaseNamespaceManager diff --git a/lib/Python/Lib/beaker/ext/google.py b/lib/Python/Lib/beaker/ext/google.py new file mode 100644 index 000000000..dd8380d7f --- /dev/null +++ b/lib/Python/Lib/beaker/ext/google.py @@ -0,0 +1,120 @@ +import cPickle +import logging +from datetime import datetime + +from beaker.container import OpenResourceNamespaceManager, Container +from beaker.exceptions import InvalidCacheBackendError +from beaker.synchronization import null_synchronizer + +log = logging.getLogger(__name__) + +db = None + +class GoogleNamespaceManager(OpenResourceNamespaceManager): + tables = {} + + @classmethod + def _init_dependencies(cls): + global db + if db is not None: + return + try: + db = __import__('google.appengine.ext.db').appengine.ext.db + except ImportError: + raise InvalidCacheBackendError("Datastore cache backend requires the " + "'google.appengine.ext' library") + + def __init__(self, namespace, table_name='beaker_cache', **params): + """Creates a datastore namespace manager""" + OpenResourceNamespaceManager.__init__(self, namespace) + + def make_cache(): + table_dict = dict(created=db.DateTimeProperty(), + accessed=db.DateTimeProperty(), + data=db.BlobProperty()) + table = type(table_name, (db.Model,), table_dict) + return table + self.table_name = table_name + self.cache = GoogleNamespaceManager.tables.setdefault(table_name, make_cache()) + self.hash = {} + self._is_new = False + self.loaded = False + self.log_debug = logging.DEBUG >= log.getEffectiveLevel() + + # Google wants namespaces to start with letters, change the namespace + # to start with a letter + self.namespace = 'p%s' % self.namespace + + def get_access_lock(self): + return null_synchronizer() + + def get_creation_lock(self, key): + # this is weird, should probably be present + return null_synchronizer() + + def do_open(self, flags): + # If we already loaded the data, don't bother loading it again + if self.loaded: + self.flags = flags + return + + item = self.cache.get_by_key_name(self.namespace) + + if not item: + self._is_new = True + self.hash = {} + else: + self._is_new = False + try: + self.hash = cPickle.loads(str(item.data)) + except (IOError, OSError, EOFError, cPickle.PickleError): + if self.log_debug: + log.debug("Couln't load pickle data, creating new storage") + self.hash = {} + self._is_new = True + self.flags = flags + self.loaded = True + + def do_close(self): + if self.flags is not None and (self.flags == 'c' or self.flags == 'w'): + if self._is_new: + item = self.cache(key_name=self.namespace) + item.data = cPickle.dumps(self.hash) + item.created = datetime.now() + item.accessed = datetime.now() + item.put() + self._is_new = False + else: + item = self.cache.get_by_key_name(self.namespace) + item.data = cPickle.dumps(self.hash) + item.accessed = datetime.now() + item.put() + self.flags = None + + def do_remove(self): + item = self.cache.get_by_key_name(self.namespace) + item.delete() + self.hash = {} + + # We can retain the fact that we did a load attempt, but since the + # file is gone this will be a new namespace should it be saved. + self._is_new = True + + def __getitem__(self, key): + return self.hash[key] + + def __contains__(self, key): + return self.hash.has_key(key) + + def __setitem__(self, key, value): + self.hash[key] = value + + def __delitem__(self, key): + del self.hash[key] + + def keys(self): + return self.hash.keys() + + +class GoogleContainer(Container): + namespace_class = GoogleNamespaceManager diff --git a/lib/Python/Lib/beaker/ext/memcached.py b/lib/Python/Lib/beaker/ext/memcached.py new file mode 100644 index 000000000..96516953f --- /dev/null +++ b/lib/Python/Lib/beaker/ext/memcached.py @@ -0,0 +1,82 @@ +from beaker.container import NamespaceManager, Container +from beaker.exceptions import InvalidCacheBackendError, MissingCacheParameter +from beaker.synchronization import file_synchronizer, null_synchronizer +from beaker.util import verify_directory, SyncDict +import warnings + +memcache = None + +class MemcachedNamespaceManager(NamespaceManager): + clients = SyncDict() + + @classmethod + def _init_dependencies(cls): + global memcache + if memcache is not None: + return + try: + import pylibmc as memcache + except ImportError: + try: + import cmemcache as memcache + warnings.warn("cmemcache is known to have serious " + "concurrency issues; consider using 'memcache' or 'pylibmc'") + except ImportError: + try: + import memcache + except ImportError: + raise InvalidCacheBackendError("Memcached cache backend requires either " + "the 'memcache' or 'cmemcache' library") + + def __init__(self, namespace, url=None, data_dir=None, lock_dir=None, **params): + NamespaceManager.__init__(self, namespace) + + if not url: + raise MissingCacheParameter("url is required") + + if lock_dir: + self.lock_dir = lock_dir + elif data_dir: + self.lock_dir = data_dir + "/container_mcd_lock" + if self.lock_dir: + verify_directory(self.lock_dir) + + self.mc = MemcachedNamespaceManager.clients.get(url, memcache.Client, url.split(';')) + + def get_creation_lock(self, key): + return file_synchronizer( + identifier="memcachedcontainer/funclock/%s" % self.namespace,lock_dir = self.lock_dir) + + def _format_key(self, key): + return self.namespace + '_' + key.replace(' ', '\302\267') + + def __getitem__(self, key): + return self.mc.get(self._format_key(key)) + + def __contains__(self, key): + value = self.mc.get(self._format_key(key)) + return value is not None + + def has_key(self, key): + return key in self + + def set_value(self, key, value, expiretime=None): + if expiretime: + self.mc.set(self._format_key(key), value, time=expiretime) + else: + self.mc.set(self._format_key(key), value) + + def __setitem__(self, key, value): + self.set_value(key, value) + + def __delitem__(self, key): + self.mc.delete(self._format_key(key)) + + def do_remove(self): + self.mc.flush_all() + + def keys(self): + raise NotImplementedError("Memcache caching does not support iteration of all cache keys") + +class MemcachedContainer(Container): + namespace_class = MemcachedNamespaceManager diff --git a/lib/Python/Lib/beaker/ext/sqla.py b/lib/Python/Lib/beaker/ext/sqla.py new file mode 100644 index 000000000..8c79633c1 --- /dev/null +++ b/lib/Python/Lib/beaker/ext/sqla.py @@ -0,0 +1,133 @@ +import cPickle +import logging +import pickle +from datetime import datetime + +from beaker.container import OpenResourceNamespaceManager, Container +from beaker.exceptions import InvalidCacheBackendError, MissingCacheParameter +from beaker.synchronization import file_synchronizer, null_synchronizer +from beaker.util import verify_directory, SyncDict + + +log = logging.getLogger(__name__) + +sa = None + +class SqlaNamespaceManager(OpenResourceNamespaceManager): + binds = SyncDict() + tables = SyncDict() + + @classmethod + def _init_dependencies(cls): + global sa + if sa is not None: + return + try: + import sqlalchemy as sa + except ImportError: + raise InvalidCacheBackendError("SQLAlchemy, which is required by " + "this backend, is not installed") + + def __init__(self, namespace, bind, table, data_dir=None, lock_dir=None, + **kwargs): + """Create a namespace manager for use with a database table via + SQLAlchemy. + + ``bind`` + SQLAlchemy ``Engine`` or ``Connection`` object + + ``table`` + SQLAlchemy ``Table`` object in which to store namespace data. + This should usually be something created by ``make_cache_table``. + """ + OpenResourceNamespaceManager.__init__(self, namespace) + + if lock_dir: + self.lock_dir = lock_dir + elif data_dir: + self.lock_dir = data_dir + "/container_db_lock" + if self.lock_dir: + verify_directory(self.lock_dir) + + self.bind = self.__class__.binds.get(str(bind.url), lambda: bind) + self.table = self.__class__.tables.get('%s:%s' % (bind.url, table.name), + lambda: table) + self.hash = {} + self._is_new = False + self.loaded = False + + def get_access_lock(self): + return null_synchronizer() + + def get_creation_lock(self, key): + return file_synchronizer( + identifier ="databasecontainer/funclock/%s" % self.namespace, + lock_dir=self.lock_dir) + + def do_open(self, flags): + if self.loaded: + self.flags = flags + return + select = sa.select([self.table.c.data], + (self.table.c.namespace == self.namespace)) + result = self.bind.execute(select).fetchone() + if not result: + self._is_new = True + self.hash = {} + else: + self._is_new = False + try: + self.hash = result['data'] + except (IOError, OSError, EOFError, cPickle.PickleError, + pickle.PickleError): + log.debug("Couln't load pickle data, creating new storage") + self.hash = {} + self._is_new = True + self.flags = flags + self.loaded = True + + def do_close(self): + if self.flags is not None and (self.flags == 'c' or self.flags == 'w'): + if self._is_new: + insert = self.table.insert() + self.bind.execute(insert, namespace=self.namespace, data=self.hash, + accessed=datetime.now(), created=datetime.now()) + self._is_new = False + else: + update = self.table.update(self.table.c.namespace == self.namespace) + self.bind.execute(update, data=self.hash, accessed=datetime.now()) + self.flags = None + + def do_remove(self): + delete = self.table.delete(self.table.c.namespace == self.namespace) + self.bind.execute(delete) + self.hash = {} + self._is_new = True + + def __getitem__(self, key): + return self.hash[key] + + def __contains__(self, key): + return self.hash.has_key(key) + + def __setitem__(self, key, value): + self.hash[key] = value + + def __delitem__(self, key): + del self.hash[key] + + def keys(self): + return self.hash.keys() + + +class SqlaContainer(Container): + namespace_manager = SqlaNamespaceManager + +def make_cache_table(metadata, table_name='beaker_cache'): + """Return a ``Table`` object suitable for storing cached values for the + namespace manager. Do not create the table.""" + return sa.Table(table_name, metadata, + sa.Column('namespace', sa.String(255), primary_key=True), + sa.Column('accessed', sa.DateTime, nullable=False), + sa.Column('created', sa.DateTime, nullable=False), + sa.Column('data', sa.PickleType, nullable=False)) |