aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Changes4
-rw-r--r--cachetools.py411
-rw-r--r--cachetools/__init__.py12
-rw-r--r--cachetools/cache.py71
-rw-r--r--cachetools/decorators.py115
-rw-r--r--cachetools/lfucache.py35
-rw-r--r--cachetools/link.py10
-rw-r--r--cachetools/lrucache.py72
-rw-r--r--cachetools/rrcache.py20
-rw-r--r--cachetools/ttlcache.py106
-rw-r--r--docs/index.rst23
-rw-r--r--setup.py4
-rw-r--r--tests/__init__.py50
-rw-r--r--tests/test_lrucache.py51
-rw-r--r--tests/test_ttlcache.py73
15 files changed, 573 insertions, 484 deletions
diff --git a/Changes b/Changes
index 3c0fd69..cd7d8dc 100644
--- a/Changes
+++ b/Changes
@@ -1,7 +1,9 @@
0.5.0 UNRELEASED
----------------
-- Update Changelog, README.
+- Update Changelog, README, documentation.
+
+- Do not delete expired items in TTLCache.__getitem__().
0.4.0 2014-06-16
diff --git a/cachetools.py b/cachetools.py
deleted file mode 100644
index 0f6a2ea..0000000
--- a/cachetools.py
+++ /dev/null
@@ -1,411 +0,0 @@
-"""Extensible memoizing collections and decorators"""
-
-import collections
-import functools
-import operator
-import random
-import time
-
-try:
- from threading import RLock
-except ImportError:
- from dummy_threading import RLock
-
-__version__ = '0.5.0'
-
-_marker = object()
-
-
-class _Link(object):
- __slots__ = 'prev', 'next', 'data'
-
-
-class Cache(collections.MutableMapping):
- """Mutable mapping to serve as a simple cache or cache base class.
-
- This class discards arbitrary items using :meth:`popitem` to make
- space when necessary. Derived classes may override
- :meth:`popitem` to implement specific caching strategies. If a
- subclass has to keep track of item access, insertion or deletion,
- it may need override :meth:`__getitem__`, :meth:`__setitem__` and
- :meth:`__delitem__`, too.
-
- """
-
- def __init__(self, maxsize, getsizeof=None):
- if getsizeof is not None:
- self.getsizeof = getsizeof
- self.__mapping = dict()
- self.__maxsize = maxsize
- self.__currsize = 0
-
- def __getitem__(self, key):
- return self.__mapping[key][0]
-
- def __setitem__(self, key, value):
- mapping = self.__mapping
- maxsize = self.__maxsize
- size = self.getsizeof(value)
- if size > maxsize:
- raise ValueError('value too large')
- if key not in mapping or mapping[key][1] < size:
- while self.__currsize + size > maxsize:
- self.popitem()
- if key in mapping:
- self.__currsize -= mapping[key][1]
- mapping[key] = (value, size)
- self.__currsize += size
-
- def __delitem__(self, key):
- _, size = self.__mapping.pop(key)
- self.__currsize -= size
-
- def __iter__(self):
- return iter(self.__mapping)
-
- def __len__(self):
- return len(self.__mapping)
-
- def __repr__(self):
- return '%s(%r, maxsize=%d, currsize=%d)' % (
- self.__class__.__name__,
- list(self.items()),
- self.__maxsize,
- self.__currsize,
- )
-
- @property
- def maxsize(self):
- """Return the maximum size of the cache."""
- return self.__maxsize
-
- @property
- def currsize(self):
- """Return the current size of the cache."""
- return self.__currsize
-
- @staticmethod
- def getsizeof(value):
- """Return the size of a cache element."""
- return 1
-
-
-class RRCache(Cache):
- """Random Replacement (RR) cache implementation.
-
- This class randomly selects candidate items and discards them to
- make space when necessary.
-
- """
-
- def popitem(self):
- """Remove and return a random `(key, value)` pair."""
- try:
- key = random.choice(list(self))
- except IndexError:
- raise KeyError('cache is empty')
- return (key, self.pop(key))
-
-
-class LFUCache(Cache):
- """Least Frequently Used (LFU) cache implementation.
-
- This class counts how often an item is retrieved, and discards the
- items used least often to make space when necessary.
-
- """
-
- def __init__(self, maxsize, getsizeof=None):
- if getsizeof is not None:
- Cache.__init__(self, maxsize, lambda e: getsizeof(e[0]))
- else:
- Cache.__init__(self, maxsize)
-
- def __getitem__(self, key, cache_getitem=Cache.__getitem__):
- entry = cache_getitem(self, key)
- entry[1] += 1
- return entry[0]
-
- def __setitem__(self, key, value, cache_setitem=Cache.__setitem__):
- cache_setitem(self, key, [value, 0])
-
- def popitem(self):
- """Remove and return the `(key, value)` pair least frequently used."""
- items = ((key, Cache.__getitem__(self, key)[1]) for key in self)
- try:
- key, _ = min(items, key=operator.itemgetter(1))
- except ValueError:
- raise KeyError('cache is empty')
- return (key, self.pop(key))
-
-
-class LRUCache(Cache):
- """Least Recently Used (LRU) cache implementation.
-
- This class discards the least recently used items first to make
- space when necessary.
-
- """
-
- def __init__(self, maxsize, getsizeof=None):
- if getsizeof is not None:
- Cache.__init__(self, maxsize, lambda e: getsizeof(e[0]))
- else:
- Cache.__init__(self, maxsize)
- root = _Link()
- root.prev = root.next = root
- self.__root = root
-
- def __getitem__(self, key, cache_getitem=Cache.__getitem__):
- value, link = cache_getitem(self, key)
- root = self.__root
- link.prev.next = link.next
- link.next.prev = link.prev
- link.prev = tail = root.prev
- link.next = root
- tail.next = root.prev = link
- return value
-
- def __setitem__(self, key, value,
- cache_getitem=Cache.__getitem__,
- cache_setitem=Cache.__setitem__):
- try:
- _, link = cache_getitem(self, key)
- except KeyError:
- link = _Link()
- cache_setitem(self, key, (value, link))
- try:
- link.prev.next = link.next
- link.next.prev = link.prev
- except AttributeError:
- link.data = key
- root = self.__root
- link.prev = tail = root.prev
- link.next = root
- tail.next = root.prev = link
-
- def __delitem__(self, key,
- cache_getitem=Cache.__getitem__,
- cache_delitem=Cache.__delitem__):
- _, link = cache_getitem(self, key)
- cache_delitem(self, key)
- link.prev.next = link.next
- link.next.prev = link.prev
- del link.next
- del link.prev
-
- def __repr__(self, cache_getitem=Cache.__getitem__):
- return '%s(%r, maxsize=%d, currsize=%d)' % (
- self.__class__.__name__,
- [(key, cache_getitem(self, key)[0]) for key in self],
- self.maxsize,
- self.currsize,
- )
-
- def popitem(self):
- """Remove and return the `(key, value)` pair least recently used."""
- root = self.__root
- link = root.next
- if link is root:
- raise KeyError('cache is empty')
- key = link.data
- return (key, self.pop(key))
-
-
-class TTLCache(LRUCache):
- """Cache implementation with per-item time-to-live (TTL) value.
-
- This class associates a time-to-live value with each item. Items
- that expire because they have exceeded their time-to-live will be
- removed automatically. If no expired items are there to remove,
- the least recently used items will be discarded first to make
- space when necessary.
-
- """
-
- def __init__(self, maxsize, ttl, getsizeof=None, timer=time.time):
- if getsizeof is not None:
- LRUCache.__init__(self, maxsize, lambda e: getsizeof(e[0]))
- else:
- LRUCache.__init__(self, maxsize)
- root = _Link()
- root.prev = root.next = root
- self.__root = root
- self.__timer = timer
- self.__ttl = ttl
-
- def __getitem__(self, key,
- cache_getitem=LRUCache.__getitem__,
- cache_delitem=LRUCache.__delitem__):
- value, link = cache_getitem(self, key)
- if self.__timer() < link.data[1]:
- return value
- root = self.__root
- head = root.next
- link = link.next
- while head is not link:
- cache_delitem(self, head.data[0])
- head.next.prev = root
- head = root.next = head.next
- raise KeyError('%r has expired' % key)
-
- def __setitem__(self, key, value,
- cache_getitem=LRUCache.__getitem__,
- cache_setitem=LRUCache.__setitem__,
- cache_delitem=LRUCache.__delitem__):
- root = self.__root
- head = root.next
- time = self.__timer()
- while head is not root and head.data[1] < time:
- cache_delitem(self, head.data[0])
- head.next.prev = root
- head = root.next = head.next
- try:
- _, link = cache_getitem(self, key)
- except KeyError:
- link = _Link()
- cache_setitem(self, key, (value, link))
- try:
- link.prev.next = link.next
- link.next.prev = link.prev
- except AttributeError:
- pass
- link.data = (key, time + self.__ttl)
- link.prev = tail = root.prev
- link.next = root
- tail.next = root.prev = link
-
- def __delitem__(self, key,
- cache_getitem=LRUCache.__getitem__,
- cache_delitem=LRUCache.__delitem__):
- _, link = cache_getitem(self, key)
- cache_delitem(self, key)
- link.prev.next = link.next
- link.next.prev = link.prev
-
- def __repr__(self, cache_getitem=LRUCache.__getitem__):
- return '%s(%r, maxsize=%d, currsize=%d)' % (
- self.__class__.__name__,
- [(key, cache_getitem(self, key)[0]) for key in self],
- self.maxsize,
- self.currsize,
- )
-
- def pop(self, key, default=_marker):
- try:
- value, link = LRUCache.__getitem__(self, key)
- except KeyError:
- if default is not _marker:
- return default
- raise
- LRUCache.__delitem__(self, key)
- link.prev.next = link.next
- link.next.prev = link.prev
- del link.next
- del link.prev
- return value
-
-
-CacheInfo = collections.namedtuple('CacheInfo', 'hits misses maxsize currsize')
-
-
-def _makekey(args, kwargs):
- return (args, tuple(sorted(kwargs.items())))
-
-
-def _makekey_typed(args, kwargs):
- key = _makekey(args, kwargs)
- key += tuple(type(v) for v in args)
- key += tuple(type(v) for k, v in sorted(kwargs.items()))
- return key
-
-
-def _cachedfunc(cache, makekey, lock):
- def decorator(func):
- stats = [0, 0]
-
- def wrapper(*args, **kwargs):
- key = makekey(args, kwargs)
- with lock:
- try:
- result = cache[key]
- stats[0] += 1
- return result
- except KeyError:
- stats[1] += 1
- result = func(*args, **kwargs)
- with lock:
- cache[key] = result
- return result
-
- def cache_info():
- with lock:
- hits, misses = stats
- maxsize = cache.maxsize
- currsize = cache.currsize
- return CacheInfo(hits, misses, maxsize, currsize)
-
- def cache_clear():
- with lock:
- cache.clear()
-
- wrapper.cache_info = cache_info
- wrapper.cache_clear = cache_clear
- return functools.update_wrapper(wrapper, func)
-
- return decorator
-
-
-def lru_cache(maxsize=128, typed=False, getsizeof=None, lock=RLock):
- """Decorator to wrap a function with a memoizing callable that saves
- up to `maxsize` results based on a Least Recently Used (LRU)
- algorithm.
-
- """
- makekey = _makekey_typed if typed else _makekey
- return _cachedfunc(LRUCache(maxsize, getsizeof), makekey, lock())
-
-
-def lfu_cache(maxsize=128, typed=False, getsizeof=None, lock=RLock):
- """Decorator to wrap a function with a memoizing callable that saves
- up to `maxsize` results based on a Least Frequently Used (LFU)
- algorithm.
-
- """
- makekey = _makekey_typed if typed else _makekey
- return _cachedfunc(LFUCache(maxsize, getsizeof), makekey, lock())
-
-
-def rr_cache(maxsize=128, typed=False, getsizeof=None, lock=RLock):
- """Decorator to wrap a function with a memoizing callable that saves
- up to `maxsize` results based on a Random Replacement (RR)
- algorithm.
-
- """
- makekey = _makekey_typed if typed else _makekey
- return _cachedfunc(RRCache(maxsize, getsizeof), makekey, lock())
-
-
-def cachedmethod(cache, typed=False):
- """Decorator to wrap a class or instance method with a memoizing
- callable that saves results in a (possibly shared) cache.
-
- """
- makekey = _makekey_typed if typed else _makekey
-
- def decorator(method):
- def wrapper(self, *args, **kwargs):
- # TODO: `shared`, locking...
- key = makekey((method,) + args, kwargs)
- mapping = cache(self)
- try:
- return mapping[key]
- except KeyError:
- pass
- result = method(self, *args, **kwargs)
- mapping[key] = result
- return result
-
- return functools.update_wrapper(wrapper, method)
-
- return decorator
diff --git a/cachetools/__init__.py b/cachetools/__init__.py
new file mode 100644
index 0000000..c999d9d
--- /dev/null
+++ b/cachetools/__init__.py
@@ -0,0 +1,12 @@
+"""Extensible memoizing collections and decorators"""
+
+# flake8: noqa
+
+from .cache import Cache
+from .rrcache import RRCache
+from .lfucache import LFUCache
+from .lrucache import LRUCache
+from .ttlcache import TTLCache
+from .decorators import rr_cache, lfu_cache, lru_cache, cachedmethod
+
+__version__ = '0.5.0'
diff --git a/cachetools/cache.py b/cachetools/cache.py
new file mode 100644
index 0000000..d5176eb
--- /dev/null
+++ b/cachetools/cache.py
@@ -0,0 +1,71 @@
+import collections
+
+
+class Cache(collections.MutableMapping):
+ """Mutable mapping to serve as a simple cache or cache base class.
+
+ This class discards arbitrary items using :meth:`popitem` to make
+ space when necessary. Derived classes may override
+ :meth:`popitem` to implement specific caching strategies. If a
+ subclass has to keep track of item access, insertion or deletion,
+ it may need override :meth:`__getitem__`, :meth:`__setitem__` and
+ :meth:`__delitem__`, too.
+
+ """
+
+ def __init__(self, maxsize, getsizeof=None):
+ if getsizeof is not None:
+ self.getsizeof = getsizeof
+ self.__mapping = dict()
+ self.__maxsize = maxsize
+ self.__currsize = 0
+
+ def __repr__(self):
+ return '%s(%r, maxsize=%d, currsize=%d)' % (
+ self.__class__.__name__,
+ list(self.items()),
+ self.__maxsize,
+ self.__currsize,
+ )
+
+ def __getitem__(self, key):
+ return self.__mapping[key][0]
+
+ def __setitem__(self, key, value):
+ mapping = self.__mapping
+ maxsize = self.__maxsize
+ size = self.getsizeof(value)
+ if size > maxsize:
+ raise ValueError('value too large')
+ if key not in mapping or mapping[key][1] < size:
+ while self.__currsize + size > maxsize:
+ self.popitem()
+ if key in mapping:
+ self.__currsize -= mapping[key][1]
+ mapping[key] = (value, size)
+ self.__currsize += size
+
+ def __delitem__(self, key):
+ _, size = self.__mapping.pop(key)
+ self.__currsize -= size
+
+ def __iter__(self):
+ return iter(self.__mapping)
+
+ def __len__(self):
+ return len(self.__mapping)
+
+ @property
+ def maxsize(self):
+ """Return the maximum size of the cache."""
+ return self.__maxsize
+
+ @property
+ def currsize(self):
+ """Return the current size of the cache."""
+ return self.__currsize
+
+ @staticmethod
+ def getsizeof(value):
+ """Return the size of a cache element."""
+ return 1
diff --git a/cachetools/decorators.py b/cachetools/decorators.py
new file mode 100644
index 0000000..9104013
--- /dev/null
+++ b/cachetools/decorators.py
@@ -0,0 +1,115 @@
+from .rrcache import RRCache
+from .lfucache import LFUCache
+from .lrucache import LRUCache
+
+import collections
+import functools
+
+try:
+ from threading import RLock
+except ImportError:
+ from dummy_threading import RLock
+
+CacheInfo = collections.namedtuple('CacheInfo', 'hits misses maxsize currsize')
+
+
+def _makekey(args, kwargs):
+ return (args, tuple(sorted(kwargs.items())))
+
+
+def _makekey_typed(args, kwargs):
+ key = _makekey(args, kwargs)
+ key += tuple(type(v) for v in args)
+ key += tuple(type(v) for k, v in sorted(kwargs.items()))
+ return key
+
+
+def _cachedfunc(cache, makekey, lock):
+ def decorator(func):
+ stats = [0, 0]
+
+ def wrapper(*args, **kwargs):
+ key = makekey(args, kwargs)
+ with lock:
+ try:
+ result = cache[key]
+ stats[0] += 1
+ return result
+ except KeyError:
+ stats[1] += 1
+ result = func(*args, **kwargs)
+ with lock:
+ cache[key] = result
+ return result
+
+ def cache_info():
+ with lock:
+ hits, misses = stats
+ maxsize = cache.maxsize
+ currsize = cache.currsize
+ return CacheInfo(hits, misses, maxsize, currsize)
+
+ def cache_clear():
+ with lock:
+ cache.clear()
+
+ wrapper.cache_info = cache_info
+ wrapper.cache_clear = cache_clear
+ return functools.update_wrapper(wrapper, func)
+
+ return decorator
+
+
+def rr_cache(maxsize=128, typed=False, getsizeof=None, lock=RLock):
+ """Decorator to wrap a function with a memoizing callable that saves
+ up to `maxsize` results based on a Random Replacement (RR)
+ algorithm.
+
+ """
+ makekey = _makekey_typed if typed else _makekey
+ return _cachedfunc(RRCache(maxsize, getsizeof), makekey, lock())
+
+
+def lfu_cache(maxsize=128, typed=False, getsizeof=None, lock=RLock):
+ """Decorator to wrap a function with a memoizing callable that saves
+ up to `maxsize` results based on a Least Frequently Used (LFU)
+ algorithm.
+
+ """
+ makekey = _makekey_typed if typed else _makekey
+ return _cachedfunc(LFUCache(maxsize, getsizeof), makekey, lock())
+
+
+def lru_cache(maxsize=128, typed=False, getsizeof=None, lock=RLock):
+ """Decorator to wrap a function with a memoizing callable that saves
+ up to `maxsize` results based on a Least Recently Used (LRU)
+ algorithm.
+
+ """
+ makekey = _makekey_typed if typed else _makekey
+ return _cachedfunc(LRUCache(maxsize, getsizeof), makekey, lock())
+
+
+def cachedmethod(cache, typed=False):
+ """Decorator to wrap a class or instance method with a memoizing
+ callable that saves results in a (possibly shared) cache.
+
+ """
+ makekey = _makekey_typed if typed else _makekey
+
+ def decorator(method):
+ def wrapper(self, *args, **kwargs):
+ # TODO: `shared`, locking...
+ key = makekey((method,) + args, kwargs)
+ mapping = cache(self)
+ try:
+ return mapping[key]
+ except KeyError:
+ pass
+ result = method(self, *args, **kwargs)
+ mapping[key] = result
+ return result
+
+ return functools.update_wrapper(wrapper, method)
+
+ return decorator
diff --git a/cachetools/lfucache.py b/cachetools/lfucache.py
new file mode 100644
index 0000000..9ea66d1
--- /dev/null
+++ b/cachetools/lfucache.py
@@ -0,0 +1,35 @@
+from .cache import Cache
+
+import operator
+
+
+class LFUCache(Cache):
+ """Least Frequently Used (LFU) cache implementation.
+
+ This class counts how often an item is retrieved, and discards the
+ items used least often to make space when necessary.
+
+ """
+
+ def __init__(self, maxsize, getsizeof=None):
+ if getsizeof is not None:
+ Cache.__init__(self, maxsize, lambda e: getsizeof(e[0]))
+ else:
+ Cache.__init__(self, maxsize)
+
+ def __getitem__(self, key, cache_getitem=Cache.__getitem__):
+ entry = cache_getitem(self, key)
+ entry[1] += 1
+ return entry[0]
+
+ def __setitem__(self, key, value, cache_setitem=Cache.__setitem__):
+ cache_setitem(self, key, [value, 0])
+
+ def popitem(self):
+ """Remove and return the `(key, value)` pair least frequently used."""
+ items = ((key, Cache.__getitem__(self, key)[1]) for key in self)
+ try:
+ key, _ = min(items, key=operator.itemgetter(1))
+ except ValueError:
+ raise KeyError('cache is empty')
+ return (key, self.pop(key))
diff --git a/cachetools/link.py b/cachetools/link.py
new file mode 100644
index 0000000..122735c
--- /dev/null
+++ b/cachetools/link.py
@@ -0,0 +1,10 @@
+class Link(object):
+ __slots__ = 'prev', 'next', 'data'
+
+ def unlink(self):
+ next = self.next
+ prev = self.prev
+ prev.next = next
+ next.prev = prev
+ del self.next
+ del self.prev
diff --git a/cachetools/lrucache.py b/cachetools/lrucache.py
new file mode 100644
index 0000000..e815545
--- /dev/null
+++ b/cachetools/lrucache.py
@@ -0,0 +1,72 @@
+from .cache import Cache
+from .link import Link
+
+
+class LRUCache(Cache):
+ """Least Recently Used (LRU) cache implementation.
+
+ This class discards the least recently used items first to make
+ space when necessary.
+
+ """
+
+ def __init__(self, maxsize, getsizeof=None):
+ if getsizeof is not None:
+ Cache.__init__(self, maxsize, lambda e: getsizeof(e[0]))
+ else:
+ Cache.__init__(self, maxsize)
+ root = Link()
+ root.prev = root.next = root
+ self.__root = root
+
+ def __repr__(self, cache_getitem=Cache.__getitem__):
+ return '%s(%r, maxsize=%d, currsize=%d)' % (
+ self.__class__.__name__,
+ [(key, cache_getitem(self, key)[0]) for key in self],
+ self.maxsize,
+ self.currsize,
+ )
+
+ def __getitem__(self, key, cache_getitem=Cache.__getitem__):
+ value, link = cache_getitem(self, key)
+ root = self.__root
+ link.prev.next = link.next
+ link.next.prev = link.prev
+ link.prev = tail = root.prev
+ link.next = root
+ tail.next = root.prev = link
+ return value
+
+ def __setitem__(self, key, value,
+ cache_getitem=Cache.__getitem__,
+ cache_setitem=Cache.__setitem__):
+ try:
+ _, link = cache_getitem(self, key)
+ except KeyError:
+ link = Link()
+ cache_setitem(self, key, (value, link))
+ try:
+ link.prev.next = link.next
+ link.next.prev = link.prev
+ except AttributeError:
+ link.data = key
+ root = self.__root
+ link.prev = tail = root.prev
+ link.next = root
+ tail.next = root.prev = link
+
+ def __delitem__(self, key,
+ cache_getitem=Cache.__getitem__,
+ cache_delitem=Cache.__delitem__):
+ _, link = cache_getitem(self, key)
+ cache_delitem(self, key)
+ link.unlink()
+
+ def popitem(self):
+ """Remove and return the `(key, value)` pair least recently used."""
+ root = self.__root
+ link = root.next
+ if link is root:
+ raise KeyError('cache is empty')
+ key = link.data
+ return (key, self.pop(key))
diff --git a/cachetools/rrcache.py b/cachetools/rrcache.py
new file mode 100644
index 0000000..3042a4e
--- /dev/null
+++ b/cachetools/rrcache.py
@@ -0,0 +1,20 @@
+from .cache import Cache
+
+import random
+
+
+class RRCache(Cache):
+ """Random Replacement (RR) cache implementation.
+
+ This class randomly selects candidate items and discards them to
+ make space when necessary.
+
+ """
+
+ def popitem(self):
+ """Remove and return a random `(key, value)` pair."""
+ try:
+ key = random.choice(list(self))
+ except IndexError:
+ raise KeyError('cache is empty')
+ return (key, self.pop(key))
diff --git a/cachetools/ttlcache.py b/cachetools/ttlcache.py
new file mode 100644
index 0000000..bb38f2b
--- /dev/null
+++ b/cachetools/ttlcache.py
@@ -0,0 +1,106 @@
+from .lrucache import LRUCache
+from .link import Link
+
+import time
+
+_marker = object()
+
+
+class TTLCache(LRUCache):
+ """Cache implementation with per-item time-to-live (TTL) value.
+
+ This class associates a time-to-live value with each item. Items
+ that expire because they have exceeded their time-to-live will be
+ removed automatically. If no expired items are there to remove,
+ the least recently used items will be discarded first to make
+ space when necessary.
+
+ """
+
+ def __init__(self, maxsize, ttl, timer=time.time, getsizeof=None):
+ if getsizeof is not None:
+ LRUCache.__init__(self, maxsize, lambda e: getsizeof(e[0]))
+ else:
+ LRUCache.__init__(self, maxsize)
+ root = Link()
+ root.prev = root.next = root
+ self.__root = root
+ self.__timer = timer
+ self.__ttl = ttl
+
+ def __repr__(self, cache_getitem=LRUCache.__getitem__):
+ return '%s(%r, maxsize=%d, currsize=%d)' % (
+ self.__class__.__name__,
+ [(key, cache_getitem(self, key)[0]) for key in self],
+ self.maxsize,
+ self.currsize,
+ )
+
+ def __getitem__(self, key, cache_getitem=LRUCache.__getitem__):
+ value, link = cache_getitem(self, key)
+ if link.data[1] < self.__timer():
+ raise KeyError('%r has expired' % key)
+ return value
+
+ def __setitem__(self, key, value,
+ cache_getitem=LRUCache.__getitem__,
+ cache_setitem=LRUCache.__setitem__,
+ cache_delitem=LRUCache.__delitem__):
+ time = self.__timer()
+ self.expire(time)
+ try:
+ _, link = cache_getitem(self, key)
+ except KeyError:
+ link = Link()
+ cache_setitem(self, key, (value, link))
+ try:
+ link.prev.next = link.next
+ link.next.prev = link.prev
+ except AttributeError:
+ pass
+ root = self.__root
+ link.data = (key, time + self.__ttl)
+ link.prev = tail = root.prev
+ link.next = root
+ tail.next = root.prev = link
+
+ def __delitem__(self, key,
+ cache_getitem=LRUCache.__getitem__,
+ cache_delitem=LRUCache.__delitem__):
+ _, link = cache_getitem(self, key)
+ cache_delitem(self, key)
+ link.unlink()
+ self.expire()
+
+ @property
+ def ttl(self):
+ """Return the time-to-live of the cache."""
+ return self.__ttl
+
+ @property
+ def timer(self):
+ """Return the timer used by the cache."""
+ return self.__timer
+
+ def pop(self, key, default=_marker):
+ try:
+ value, link = LRUCache.__getitem__(self, key)
+ except KeyError:
+ if default is _marker:
+ raise
+ return default
+ LRUCache.__delitem__(self, key)
+ link.unlink()
+ self.expire()
+ return value
+
+ def expire(self, time=None, cache_delitem=LRUCache.__delitem__):
+ """TODO"""
+ if time is None:
+ time = self.__timer()
+ root = self.__root
+ head = root.next
+ while head is not root and head.data[1] < time:
+ cache_delitem(self, head.data[0])
+ head.next.prev = root
+ head = root.next = head.next
diff --git a/docs/index.rst b/docs/index.rst
index 93dc71c..66e27fb 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -52,9 +52,9 @@ current size of the cache.
returns the size of a given item and may be overridden by subclasses.
The default implementation of :meth:`getsizeof` returns :const:`1`
irrespective of its `value` argument. For convenience, all cache
-classes also accept an optional constructor parameter `getsizeof`,
-that may specify a function of one argument used to extract the size
-of an item's value instead of the class' :meth:`getsizeof` method.
+classes also accept an optional named constructor parameter
+`getsizeof`, that may specify a function of one argument used to
+retrieve the size of an item's value.
.. autoclass:: Cache
@@ -73,8 +73,8 @@ of an item's value instead of the class' :meth:`getsizeof` method.
:members:
Note that a cache item may expire at *any* time, so iterating over
- the items of a :class:`TTLCache` may raise :class:`KeyError` or
- :class:`RuntimeError` unexpectedly::
+ the items of a :class:`TTLCache` may raise :class:`KeyError`
+ unexpectedly::
from cachetools import TTLCache
import time
@@ -83,14 +83,11 @@ of an item's value instead of the class' :meth:`getsizeof` method.
cache.update({1: 1, 2: 2, 3: 3})
time.sleep(1)
- try:
- for key in cache:
- try:
- print(cache[key])
- except KeyError:
- print('Key %r has expired' % key)
- except RuntimeError as e:
- print(e)
+ for key in cache:
+ try:
+ print(cache[key])
+ except KeyError:
+ print('Key %r has expired' % key)
Function Decorators
diff --git a/setup.py b/setup.py
index ebe35c9..ef6ba8e 100644
--- a/setup.py
+++ b/setup.py
@@ -9,7 +9,7 @@ def get_version(filename):
setup(
name='cachetools',
- version=get_version('cachetools.py'),
+ version=get_version('cachetools/__init__.py'),
author='Thomas Kemmer',
author_email='tkemmer@computer.org',
url='https://github.com/tkem/cachetools',
@@ -31,6 +31,6 @@ setup(
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Libraries :: Python Modules'
],
- py_modules=['cachetools'],
+ packages=['cachetools'],
test_suite='tests'
)
diff --git a/tests/__init__.py b/tests/__init__.py
index 4ab4a11..7048bfd 100644
--- a/tests/__init__.py
+++ b/tests/__init__.py
@@ -124,3 +124,53 @@ class CacheTestMixin(object):
self.assertEqual(1, len(cache))
self.assertEqual(3, cache.currsize)
self.assertEqual(3, cache[3])
+
+
+class LRUCacheTestMixin(CacheTestMixin):
+
+ def test_lru_insert(self):
+ cache = self.make_cache(maxsize=2)
+
+ cache[1] = 1
+ cache[2] = 2
+ cache[3] = 3
+
+ self.assertEqual(len(cache), 2)
+ self.assertEqual(cache[2], 2)
+ self.assertEqual(cache[3], 3)
+ self.assertNotIn(1, cache)
+
+ cache[2]
+ cache[4] = 4
+ self.assertEqual(len(cache), 2)
+ self.assertEqual(cache[2], 2)
+ self.assertEqual(cache[4], 4)
+ self.assertNotIn(3, cache)
+
+ cache[5] = 5
+ self.assertEqual(len(cache), 2)
+ self.assertEqual(cache[4], 4)
+ self.assertEqual(cache[5], 5)
+ self.assertNotIn(2, cache)
+
+ def test_lru_getsizeof(self):
+ cache = self.make_cache(maxsize=3, getsizeof=lambda x: x)
+
+ cache[1] = 1
+ cache[2] = 2
+
+ self.assertEqual(len(cache), 2)
+ self.assertEqual(cache[1], 1)
+ self.assertEqual(cache[2], 2)
+
+ cache[3] = 3
+
+ self.assertEqual(len(cache), 1)
+ self.assertEqual(cache[3], 3)
+ self.assertNotIn(1, cache)
+ self.assertNotIn(2, cache)
+
+ with self.assertRaises(ValueError):
+ cache[4] = 4
+ self.assertEqual(len(cache), 1)
+ self.assertEqual(cache[3], 3)
diff --git a/tests/test_lrucache.py b/tests/test_lrucache.py
index 242a0e7..bdbb39d 100644
--- a/tests/test_lrucache.py
+++ b/tests/test_lrucache.py
@@ -1,6 +1,6 @@
import unittest
-from . import CacheTestMixin
+from . import LRUCacheTestMixin
from cachetools import LRUCache, lru_cache
@@ -14,58 +14,11 @@ def cached_typed(n):
return n
-class LRUCacheTest(unittest.TestCase, CacheTestMixin):
+class LRUCacheTest(unittest.TestCase, LRUCacheTestMixin):
def make_cache(self, maxsize, getsizeof=None):
return LRUCache(maxsize, getsizeof)
- def test_lru_insert(self):
- cache = self.make_cache(maxsize=2)
-
- cache[1] = 1
- cache[2] = 2
- cache[3] = 3
-
- self.assertEqual(len(cache), 2)
- self.assertEqual(cache[2], 2)
- self.assertEqual(cache[3], 3)
- self.assertNotIn(1, cache)
-
- cache[2]
- cache[4] = 4
- self.assertEqual(len(cache), 2)
- self.assertEqual(cache[2], 2)
- self.assertEqual(cache[4], 4)
- self.assertNotIn(3, cache)
-
- cache[5] = 5
- self.assertEqual(len(cache), 2)
- self.assertEqual(cache[4], 4)
- self.assertEqual(cache[5], 5)
- self.assertNotIn(2, cache)
-
- def test_lru_getsizeof(self):
- cache = self.make_cache(maxsize=3, getsizeof=lambda x: x)
-
- cache[1] = 1
- cache[2] = 2
-
- self.assertEqual(len(cache), 2)
- self.assertEqual(cache[1], 1)
- self.assertEqual(cache[2], 2)
-
- cache[3] = 3
-
- self.assertEqual(len(cache), 1)
- self.assertEqual(cache[3], 3)
- self.assertNotIn(1, cache)
- self.assertNotIn(2, cache)
-
- with self.assertRaises(ValueError):
- cache[4] = 4
- self.assertEqual(len(cache), 1)
- self.assertEqual(cache[3], 3)
-
def test_decorator(self):
self.assertEqual(cached.cache_info(), (0, 0, 2, 0))
self.assertEqual(cached(1), 1)
diff --git a/tests/test_ttlcache.py b/tests/test_ttlcache.py
index 5e89642..6bf2d5e 100644
--- a/tests/test_ttlcache.py
+++ b/tests/test_ttlcache.py
@@ -1,24 +1,81 @@
import unittest
-from . import CacheTestMixin
+from . import LRUCacheTestMixin
from cachetools import TTLCache
-class TTLCacheTest(unittest.TestCase, CacheTestMixin):
+class TTLCacheTest(unittest.TestCase, LRUCacheTestMixin):
- def make_cache(self, maxsize, getsizeof=None, ttl=86400):
- return TTLCache(maxsize, ttl, getsizeof)
+ def make_cache(self, maxsize, getsizeof=None):
+ return TTLCache(maxsize, ttl=0, timer=lambda: 0, getsizeof=getsizeof)
- def test_ttl(self):
- cache = self.make_cache(maxsize=2, ttl=0)
+ def make_ttl_cache(self, maxsize, ttl):
+ class Timer:
+ def __init__(self):
+ self.__time = 0
+
+ def __call__(self):
+ return self.__time
+
+ def inc(self):
+ self.__time = self.__time + 1
+
+ return TTLCache(maxsize, ttl, timer=Timer())
+
+ def test_ttl_insert(self):
+ cache = self.make_ttl_cache(maxsize=2, ttl=2)
+ self.assertEqual(cache.ttl, 2)
cache[1] = 1
+
+ self.assertEqual(1, len(cache))
+ self.assertEqual(1, cache[1])
+
+ cache.timer.inc()
cache[2] = 2
+
+ self.assertEqual(2, len(cache))
+ self.assertEqual(1, cache[1])
+ self.assertEqual(2, cache[2])
+
+ cache.timer.inc()
+ cache[1]
cache[3] = 3
+ self.assertEqual(2, len(cache))
+ self.assertEqual(1, cache[1])
+ self.assertNotIn(2, cache)
+ self.assertEqual(3, cache[3])
+
+ def test_ttl_expire(self):
+ cache = self.make_ttl_cache(maxsize=3, ttl=0)
+ self.assertEqual(cache.ttl, 0)
+
+ cache[1] = 1
+ self.assertEqual(1, cache[1])
+ cache.timer.inc()
with self.assertRaises(KeyError):
cache[1]
+ cache[2] = 2
+ self.assertEqual(2, cache[2])
+ cache.timer.inc()
with self.assertRaises(KeyError):
cache[2]
- with self.assertRaises(KeyError):
- cache[3]
+ cache[3] = 3
+ self.assertEqual(3, cache[3])
+
+ cache.expire(1)
+ self.assertNotIn(1, cache)
+ self.assertEqual(3, cache[3])
+
+ cache.expire(2)
+ self.assertNotIn(1, cache)
+ self.assertNotIn(2, cache)
+ self.assertEqual(3, cache[3])
+
+ cache.timer.inc()
+ cache.expire()
+ self.assertEqual(0, len(cache))
+ self.assertNotIn(1, cache)
+ self.assertNotIn(2, cache)
+ self.assertNotIn(3, cache)