aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/cachetools/__init__.py596
-rw-r--r--src/cachetools/cache.py9
-rw-r--r--src/cachetools/fifo.py9
-rw-r--r--src/cachetools/func.py171
-rw-r--r--src/cachetools/keys.py52
-rw-r--r--src/cachetools/lfu.py9
-rw-r--r--src/cachetools/lru.py9
-rw-r--r--src/cachetools/mru.py9
-rw-r--r--src/cachetools/rr.py9
-rw-r--r--src/cachetools/ttl.py9
10 files changed, 882 insertions, 0 deletions
diff --git a/src/cachetools/__init__.py b/src/cachetools/__init__.py
new file mode 100644
index 0000000..42822f0
--- /dev/null
+++ b/src/cachetools/__init__.py
@@ -0,0 +1,596 @@
+"""Extensible memoizing collections and decorators."""
+
+__all__ = (
+ "Cache",
+ "FIFOCache",
+ "LFUCache",
+ "LRUCache",
+ "MRUCache",
+ "RRCache",
+ "TTLCache",
+ "cached",
+ "cachedmethod",
+)
+
+__version__ = "4.2.4"
+
+import collections
+import collections.abc
+import functools
+import random
+import time
+
+from .keys import hashkey
+
+
+class _DefaultSize:
+
+ __slots__ = ()
+
+ def __getitem__(self, _):
+ return 1
+
+ def __setitem__(self, _, value):
+ assert value == 1
+
+ def pop(self, _):
+ return 1
+
+
+class Cache(collections.abc.MutableMapping):
+ """Mutable mapping to serve as a simple cache or cache base class."""
+
+ __marker = object()
+
+ __size = _DefaultSize()
+
+ def __init__(self, maxsize, getsizeof=None):
+ if getsizeof:
+ self.getsizeof = getsizeof
+ if self.getsizeof is not Cache.getsizeof:
+ self.__size = dict()
+ self.__data = dict()
+ self.__currsize = 0
+ self.__maxsize = maxsize
+
+ def __repr__(self):
+ return "%s(%r, maxsize=%r, currsize=%r)" % (
+ self.__class__.__name__,
+ list(self.__data.items()),
+ self.__maxsize,
+ self.__currsize,
+ )
+
+ def __getitem__(self, key):
+ try:
+ return self.__data[key]
+ except KeyError:
+ return self.__missing__(key)
+
+ def __setitem__(self, key, value):
+ maxsize = self.__maxsize
+ size = self.getsizeof(value)
+ if size > maxsize:
+ raise ValueError("value too large")
+ if key not in self.__data or self.__size[key] < size:
+ while self.__currsize + size > maxsize:
+ self.popitem()
+ if key in self.__data:
+ diffsize = size - self.__size[key]
+ else:
+ diffsize = size
+ self.__data[key] = value
+ self.__size[key] = size
+ self.__currsize += diffsize
+
+ def __delitem__(self, key):
+ size = self.__size.pop(key)
+ del self.__data[key]
+ self.__currsize -= size
+
+ def __contains__(self, key):
+ return key in self.__data
+
+ def __missing__(self, key):
+ raise KeyError(key)
+
+ def __iter__(self):
+ return iter(self.__data)
+
+ def __len__(self):
+ return len(self.__data)
+
+ def get(self, key, default=None):
+ if key in self:
+ return self[key]
+ else:
+ return default
+
+ def pop(self, key, default=__marker):
+ if key in self:
+ value = self[key]
+ del self[key]
+ elif default is self.__marker:
+ raise KeyError(key)
+ else:
+ value = default
+ return value
+
+ def setdefault(self, key, default=None):
+ if key in self:
+ value = self[key]
+ else:
+ self[key] = value = default
+ return value
+
+ @property
+ def maxsize(self):
+ """The maximum size of the cache."""
+ return self.__maxsize
+
+ @property
+ def currsize(self):
+ """The current size of the cache."""
+ return self.__currsize
+
+ @staticmethod
+ def getsizeof(value):
+ """Return the size of a cache element's value."""
+ return 1
+
+
+class FIFOCache(Cache):
+ """First In First Out (FIFO) cache implementation."""
+
+ def __init__(self, maxsize, getsizeof=None):
+ Cache.__init__(self, maxsize, getsizeof)
+ self.__order = collections.OrderedDict()
+
+ def __setitem__(self, key, value, cache_setitem=Cache.__setitem__):
+ cache_setitem(self, key, value)
+ try:
+ self.__order.move_to_end(key)
+ except KeyError:
+ self.__order[key] = None
+
+ def __delitem__(self, key, cache_delitem=Cache.__delitem__):
+ cache_delitem(self, key)
+ del self.__order[key]
+
+ def popitem(self):
+ """Remove and return the `(key, value)` pair first inserted."""
+ try:
+ key = next(iter(self.__order))
+ except StopIteration:
+ raise KeyError("%s is empty" % type(self).__name__) from None
+ else:
+ return (key, self.pop(key))
+
+
+class LFUCache(Cache):
+ """Least Frequently Used (LFU) cache implementation."""
+
+ def __init__(self, maxsize, getsizeof=None):
+ Cache.__init__(self, maxsize, getsizeof)
+ self.__counter = collections.Counter()
+
+ def __getitem__(self, key, cache_getitem=Cache.__getitem__):
+ value = cache_getitem(self, key)
+ if key in self: # __missing__ may not store item
+ self.__counter[key] -= 1
+ return value
+
+ def __setitem__(self, key, value, cache_setitem=Cache.__setitem__):
+ cache_setitem(self, key, value)
+ self.__counter[key] -= 1
+
+ def __delitem__(self, key, cache_delitem=Cache.__delitem__):
+ cache_delitem(self, key)
+ del self.__counter[key]
+
+ def popitem(self):
+ """Remove and return the `(key, value)` pair least frequently used."""
+ try:
+ ((key, _),) = self.__counter.most_common(1)
+ except ValueError:
+ raise KeyError("%s is empty" % type(self).__name__) from None
+ else:
+ return (key, self.pop(key))
+
+
+class LRUCache(Cache):
+ """Least Recently Used (LRU) cache implementation."""
+
+ def __init__(self, maxsize, getsizeof=None):
+ Cache.__init__(self, maxsize, getsizeof)
+ self.__order = collections.OrderedDict()
+
+ def __getitem__(self, key, cache_getitem=Cache.__getitem__):
+ value = cache_getitem(self, key)
+ if key in self: # __missing__ may not store item
+ self.__update(key)
+ return value
+
+ def __setitem__(self, key, value, cache_setitem=Cache.__setitem__):
+ cache_setitem(self, key, value)
+ self.__update(key)
+
+ def __delitem__(self, key, cache_delitem=Cache.__delitem__):
+ cache_delitem(self, key)
+ del self.__order[key]
+
+ def popitem(self):
+ """Remove and return the `(key, value)` pair least recently used."""
+ try:
+ key = next(iter(self.__order))
+ except StopIteration:
+ raise KeyError("%s is empty" % type(self).__name__) from None
+ else:
+ return (key, self.pop(key))
+
+ def __update(self, key):
+ try:
+ self.__order.move_to_end(key)
+ except KeyError:
+ self.__order[key] = None
+
+
+class MRUCache(Cache):
+ """Most Recently Used (MRU) cache implementation."""
+
+ def __init__(self, maxsize, getsizeof=None):
+ Cache.__init__(self, maxsize, getsizeof)
+ self.__order = collections.OrderedDict()
+
+ def __getitem__(self, key, cache_getitem=Cache.__getitem__):
+ value = cache_getitem(self, key)
+ if key in self: # __missing__ may not store item
+ self.__update(key)
+ return value
+
+ def __setitem__(self, key, value, cache_setitem=Cache.__setitem__):
+ cache_setitem(self, key, value)
+ self.__update(key)
+
+ def __delitem__(self, key, cache_delitem=Cache.__delitem__):
+ cache_delitem(self, key)
+ del self.__order[key]
+
+ def popitem(self):
+ """Remove and return the `(key, value)` pair most recently used."""
+ try:
+ key = next(iter(self.__order))
+ except StopIteration:
+ raise KeyError("%s is empty" % type(self).__name__) from None
+ else:
+ return (key, self.pop(key))
+
+ def __update(self, key):
+ try:
+ self.__order.move_to_end(key, last=False)
+ except KeyError:
+ self.__order[key] = None
+
+
+class RRCache(Cache):
+ """Random Replacement (RR) cache implementation."""
+
+ def __init__(self, maxsize, choice=random.choice, getsizeof=None):
+ Cache.__init__(self, maxsize, getsizeof)
+ self.__choice = choice
+
+ @property
+ def choice(self):
+ """The `choice` function used by the cache."""
+ return self.__choice
+
+ def popitem(self):
+ """Remove and return a random `(key, value)` pair."""
+ try:
+ key = self.__choice(list(self))
+ except IndexError:
+ raise KeyError("%s is empty" % type(self).__name__) from None
+ else:
+ return (key, self.pop(key))
+
+
+class _Timer:
+ def __init__(self, timer):
+ self.__timer = timer
+ self.__nesting = 0
+
+ def __call__(self):
+ if self.__nesting == 0:
+ return self.__timer()
+ else:
+ return self.__time
+
+ def __enter__(self):
+ if self.__nesting == 0:
+ self.__time = time = self.__timer()
+ else:
+ time = self.__time
+ self.__nesting += 1
+ return time
+
+ def __exit__(self, *exc):
+ self.__nesting -= 1
+
+ def __reduce__(self):
+ return _Timer, (self.__timer,)
+
+ def __getattr__(self, name):
+ return getattr(self.__timer, name)
+
+
+class _Link:
+
+ __slots__ = ("key", "expire", "next", "prev")
+
+ def __init__(self, key=None, expire=None):
+ self.key = key
+ self.expire = expire
+
+ def __reduce__(self):
+ return _Link, (self.key, self.expire)
+
+ def unlink(self):
+ next = self.next
+ prev = self.prev
+ prev.next = next
+ next.prev = prev
+
+
+class TTLCache(Cache):
+ """LRU Cache implementation with per-item time-to-live (TTL) value."""
+
+ def __init__(self, maxsize, ttl, timer=time.monotonic, getsizeof=None):
+ Cache.__init__(self, maxsize, getsizeof)
+ self.__root = root = _Link()
+ root.prev = root.next = root
+ self.__links = collections.OrderedDict()
+ self.__timer = _Timer(timer)
+ self.__ttl = ttl
+
+ def __contains__(self, key):
+ try:
+ link = self.__links[key] # no reordering
+ except KeyError:
+ return False
+ else:
+ return not (link.expire < self.__timer())
+
+ def __getitem__(self, key, cache_getitem=Cache.__getitem__):
+ try:
+ link = self.__getlink(key)
+ except KeyError:
+ expired = False
+ else:
+ expired = link.expire < self.__timer()
+ if expired:
+ return self.__missing__(key)
+ else:
+ return cache_getitem(self, key)
+
+ def __setitem__(self, key, value, cache_setitem=Cache.__setitem__):
+ with self.__timer as time:
+ self.expire(time)
+ cache_setitem(self, key, value)
+ try:
+ link = self.__getlink(key)
+ except KeyError:
+ self.__links[key] = link = _Link(key)
+ else:
+ link.unlink()
+ link.expire = time + self.__ttl
+ link.next = root = self.__root
+ link.prev = prev = root.prev
+ prev.next = root.prev = link
+
+ def __delitem__(self, key, cache_delitem=Cache.__delitem__):
+ cache_delitem(self, key)
+ link = self.__links.pop(key)
+ link.unlink()
+ if link.expire < self.__timer():
+ raise KeyError(key)
+
+ def __iter__(self):
+ root = self.__root
+ curr = root.next
+ while curr is not root:
+ # "freeze" time for iterator access
+ with self.__timer as time:
+ if not (curr.expire < time):
+ yield curr.key
+ curr = curr.next
+
+ def __len__(self):
+ root = self.__root
+ curr = root.next
+ time = self.__timer()
+ count = len(self.__links)
+ while curr is not root and curr.expire < time:
+ count -= 1
+ curr = curr.next
+ return count
+
+ def __setstate__(self, state):
+ self.__dict__.update(state)
+ root = self.__root
+ root.prev = root.next = root
+ for link in sorted(self.__links.values(), key=lambda obj: obj.expire):
+ link.next = root
+ link.prev = prev = root.prev
+ prev.next = root.prev = link
+ self.expire(self.__timer())
+
+ def __repr__(self, cache_repr=Cache.__repr__):
+ with self.__timer as time:
+ self.expire(time)
+ return cache_repr(self)
+
+ @property
+ def currsize(self):
+ with self.__timer as time:
+ self.expire(time)
+ return super().currsize
+
+ @property
+ def timer(self):
+ """The timer function used by the cache."""
+ return self.__timer
+
+ @property
+ def ttl(self):
+ """The time-to-live value of the cache's items."""
+ return self.__ttl
+
+ def expire(self, time=None):
+ """Remove expired items from the cache."""
+ if time is None:
+ time = self.__timer()
+ root = self.__root
+ curr = root.next
+ links = self.__links
+ cache_delitem = Cache.__delitem__
+ while curr is not root and curr.expire < time:
+ cache_delitem(self, curr.key)
+ del links[curr.key]
+ next = curr.next
+ curr.unlink()
+ curr = next
+
+ def clear(self):
+ with self.__timer as time:
+ self.expire(time)
+ Cache.clear(self)
+
+ def get(self, *args, **kwargs):
+ with self.__timer:
+ return Cache.get(self, *args, **kwargs)
+
+ def pop(self, *args, **kwargs):
+ with self.__timer:
+ return Cache.pop(self, *args, **kwargs)
+
+ def setdefault(self, *args, **kwargs):
+ with self.__timer:
+ return Cache.setdefault(self, *args, **kwargs)
+
+ def popitem(self):
+ """Remove and return the `(key, value)` pair least recently used that
+ has not already expired.
+
+ """
+ with self.__timer as time:
+ self.expire(time)
+ try:
+ key = next(iter(self.__links))
+ except StopIteration:
+ raise KeyError("%s is empty" % type(self).__name__) from None
+ else:
+ return (key, self.pop(key))
+
+ def __getlink(self, key):
+ value = self.__links[key]
+ self.__links.move_to_end(key)
+ return value
+
+
+def cached(cache, key=hashkey, lock=None):
+ """Decorator to wrap a function with a memoizing callable that saves
+ results in a cache.
+
+ """
+
+ def decorator(func):
+ if cache is None:
+
+ def wrapper(*args, **kwargs):
+ return func(*args, **kwargs)
+
+ elif lock is None:
+
+ def wrapper(*args, **kwargs):
+ k = key(*args, **kwargs)
+ try:
+ return cache[k]
+ except KeyError:
+ pass # key not found
+ v = func(*args, **kwargs)
+ try:
+ cache[k] = v
+ except ValueError:
+ pass # value too large
+ return v
+
+ else:
+
+ def wrapper(*args, **kwargs):
+ k = key(*args, **kwargs)
+ try:
+ with lock:
+ return cache[k]
+ except KeyError:
+ pass # key not found
+ v = func(*args, **kwargs)
+ # in case of a race, prefer the item already in the cache
+ try:
+ with lock:
+ return cache.setdefault(k, v)
+ except ValueError:
+ return v # value too large
+
+ return functools.update_wrapper(wrapper, func)
+
+ return decorator
+
+
+def cachedmethod(cache, key=hashkey, lock=None):
+ """Decorator to wrap a class or instance method with a memoizing
+ callable that saves results in a cache.
+
+ """
+
+ def decorator(method):
+ if lock is None:
+
+ def wrapper(self, *args, **kwargs):
+ c = cache(self)
+ if c is None:
+ return method(self, *args, **kwargs)
+ k = key(*args, **kwargs)
+ try:
+ return c[k]
+ except KeyError:
+ pass # key not found
+ v = method(self, *args, **kwargs)
+ try:
+ c[k] = v
+ except ValueError:
+ pass # value too large
+ return v
+
+ else:
+
+ def wrapper(self, *args, **kwargs):
+ c = cache(self)
+ if c is None:
+ return method(self, *args, **kwargs)
+ k = key(*args, **kwargs)
+ try:
+ with lock(self):
+ return c[k]
+ except KeyError:
+ pass # key not found
+ v = method(self, *args, **kwargs)
+ # in case of a race, prefer the item already in the cache
+ try:
+ with lock(self):
+ return c.setdefault(k, v)
+ except ValueError:
+ return v # value too large
+
+ return functools.update_wrapper(wrapper, method)
+
+ return decorator
diff --git a/src/cachetools/cache.py b/src/cachetools/cache.py
new file mode 100644
index 0000000..ee5269d
--- /dev/null
+++ b/src/cachetools/cache.py
@@ -0,0 +1,9 @@
+import warnings
+
+from . import Cache
+
+warnings.warn(
+ "cachetools.cache is deprecated, please use cachetools.Cache",
+ DeprecationWarning,
+ stacklevel=2,
+)
diff --git a/src/cachetools/fifo.py b/src/cachetools/fifo.py
new file mode 100644
index 0000000..a29b789
--- /dev/null
+++ b/src/cachetools/fifo.py
@@ -0,0 +1,9 @@
+import warnings
+
+from . import FIFOCache
+
+warnings.warn(
+ "cachetools.fifo is deprecated, please use cachetools.FIFOCache",
+ DeprecationWarning,
+ stacklevel=2,
+)
diff --git a/src/cachetools/func.py b/src/cachetools/func.py
new file mode 100644
index 0000000..01702c2
--- /dev/null
+++ b/src/cachetools/func.py
@@ -0,0 +1,171 @@
+"""`functools.lru_cache` compatible memoizing function decorators."""
+
+__all__ = ("fifo_cache", "lfu_cache", "lru_cache", "mru_cache", "rr_cache", "ttl_cache")
+
+import collections
+import functools
+import math
+import random
+import time
+
+try:
+ from threading import RLock
+except ImportError: # pragma: no cover
+ from dummy_threading import RLock
+
+from . import FIFOCache, LFUCache, LRUCache, MRUCache, RRCache, TTLCache
+from . import keys
+
+
+_CacheInfo = collections.namedtuple(
+ "CacheInfo", ["hits", "misses", "maxsize", "currsize"]
+)
+
+
+class _UnboundCache(dict):
+ @property
+ def maxsize(self):
+ return None
+
+ @property
+ def currsize(self):
+ return len(self)
+
+
+class _UnboundTTLCache(TTLCache):
+ def __init__(self, ttl, timer):
+ TTLCache.__init__(self, math.inf, ttl, timer)
+
+ @property
+ def maxsize(self):
+ return None
+
+
+def _cache(cache, typed):
+ maxsize = cache.maxsize
+
+ def decorator(func):
+ key = keys.typedkey if typed else keys.hashkey
+ lock = RLock()
+ stats = [0, 0]
+
+ def wrapper(*args, **kwargs):
+ k = key(*args, **kwargs)
+ with lock:
+ try:
+ v = cache[k]
+ stats[0] += 1
+ return v
+ except KeyError:
+ stats[1] += 1
+ v = func(*args, **kwargs)
+ # in case of a race, prefer the item already in the cache
+ try:
+ with lock:
+ return cache.setdefault(k, v)
+ except ValueError:
+ return v # value too large
+
+ def cache_info():
+ with lock:
+ hits, misses = stats
+ maxsize = cache.maxsize
+ currsize = cache.currsize
+ return _CacheInfo(hits, misses, maxsize, currsize)
+
+ def cache_clear():
+ with lock:
+ try:
+ cache.clear()
+ finally:
+ stats[:] = [0, 0]
+
+ wrapper.cache_info = cache_info
+ wrapper.cache_clear = cache_clear
+ wrapper.cache_parameters = lambda: {"maxsize": maxsize, "typed": typed}
+ functools.update_wrapper(wrapper, func)
+ return wrapper
+
+ return decorator
+
+
+def fifo_cache(maxsize=128, typed=False):
+ """Decorator to wrap a function with a memoizing callable that saves
+ up to `maxsize` results based on a First In First Out (FIFO)
+ algorithm.
+
+ """
+ if maxsize is None:
+ return _cache(_UnboundCache(), typed)
+ elif callable(maxsize):
+ return _cache(FIFOCache(128), typed)(maxsize)
+ else:
+ return _cache(FIFOCache(maxsize), typed)
+
+
+def lfu_cache(maxsize=128, typed=False):
+ """Decorator to wrap a function with a memoizing callable that saves
+ up to `maxsize` results based on a Least Frequently Used (LFU)
+ algorithm.
+
+ """
+ if maxsize is None:
+ return _cache(_UnboundCache(), typed)
+ elif callable(maxsize):
+ return _cache(LFUCache(128), typed)(maxsize)
+ else:
+ return _cache(LFUCache(maxsize), typed)
+
+
+def lru_cache(maxsize=128, typed=False):
+ """Decorator to wrap a function with a memoizing callable that saves
+ up to `maxsize` results based on a Least Recently Used (LRU)
+ algorithm.
+
+ """
+ if maxsize is None:
+ return _cache(_UnboundCache(), typed)
+ elif callable(maxsize):
+ return _cache(LRUCache(128), typed)(maxsize)
+ else:
+ return _cache(LRUCache(maxsize), typed)
+
+
+def mru_cache(maxsize=128, typed=False):
+ """Decorator to wrap a function with a memoizing callable that saves
+ up to `maxsize` results based on a Most Recently Used (MRU)
+ algorithm.
+ """
+ if maxsize is None:
+ return _cache(_UnboundCache(), typed)
+ elif callable(maxsize):
+ return _cache(MRUCache(128), typed)(maxsize)
+ else:
+ return _cache(MRUCache(maxsize), typed)
+
+
+def rr_cache(maxsize=128, choice=random.choice, typed=False):
+ """Decorator to wrap a function with a memoizing callable that saves
+ up to `maxsize` results based on a Random Replacement (RR)
+ algorithm.
+
+ """
+ if maxsize is None:
+ return _cache(_UnboundCache(), typed)
+ elif callable(maxsize):
+ return _cache(RRCache(128, choice), typed)(maxsize)
+ else:
+ return _cache(RRCache(maxsize, choice), typed)
+
+
+def ttl_cache(maxsize=128, ttl=600, timer=time.monotonic, typed=False):
+ """Decorator to wrap a function with a memoizing callable that saves
+ up to `maxsize` results based on a Least Recently Used (LRU)
+ algorithm with a per-item time-to-live (TTL) value.
+ """
+ if maxsize is None:
+ return _cache(_UnboundTTLCache(ttl, timer), typed)
+ elif callable(maxsize):
+ return _cache(TTLCache(128, ttl, timer), typed)(maxsize)
+ else:
+ return _cache(TTLCache(maxsize, ttl, timer), typed)
diff --git a/src/cachetools/keys.py b/src/cachetools/keys.py
new file mode 100644
index 0000000..13630a4
--- /dev/null
+++ b/src/cachetools/keys.py
@@ -0,0 +1,52 @@
+"""Key functions for memoizing decorators."""
+
+__all__ = ("hashkey", "typedkey")
+
+
+class _HashedTuple(tuple):
+ """A tuple that ensures that hash() will be called no more than once
+ per element, since cache decorators will hash the key multiple
+ times on a cache miss. See also _HashedSeq in the standard
+ library functools implementation.
+
+ """
+
+ __hashvalue = None
+
+ def __hash__(self, hash=tuple.__hash__):
+ hashvalue = self.__hashvalue
+ if hashvalue is None:
+ self.__hashvalue = hashvalue = hash(self)
+ return hashvalue
+
+ def __add__(self, other, add=tuple.__add__):
+ return _HashedTuple(add(self, other))
+
+ def __radd__(self, other, add=tuple.__add__):
+ return _HashedTuple(add(other, self))
+
+ def __getstate__(self):
+ return {}
+
+
+# used for separating keyword arguments; we do not use an object
+# instance here so identity is preserved when pickling/unpickling
+_kwmark = (_HashedTuple,)
+
+
+def hashkey(*args, **kwargs):
+ """Return a cache key for the specified hashable arguments."""
+
+ if kwargs:
+ return _HashedTuple(args + sum(sorted(kwargs.items()), _kwmark))
+ else:
+ return _HashedTuple(args)
+
+
+def typedkey(*args, **kwargs):
+ """Return a typed cache key for the specified hashable arguments."""
+
+ key = hashkey(*args, **kwargs)
+ key += tuple(type(v) for v in args)
+ key += tuple(type(v) for _, v in sorted(kwargs.items()))
+ return key
diff --git a/src/cachetools/lfu.py b/src/cachetools/lfu.py
new file mode 100644
index 0000000..5c9acef
--- /dev/null
+++ b/src/cachetools/lfu.py
@@ -0,0 +1,9 @@
+import warnings
+
+from . import LFUCache
+
+warnings.warn(
+ "cachetools.lfu is deprecated, please use cachetools.LFUCache",
+ DeprecationWarning,
+ stacklevel=2,
+)
diff --git a/src/cachetools/lru.py b/src/cachetools/lru.py
new file mode 100644
index 0000000..48ddb36
--- /dev/null
+++ b/src/cachetools/lru.py
@@ -0,0 +1,9 @@
+import warnings
+
+from . import LRUCache
+
+warnings.warn(
+ "cachetools.lru is deprecated, please use cachetools.LRUCache",
+ DeprecationWarning,
+ stacklevel=2,
+)
diff --git a/src/cachetools/mru.py b/src/cachetools/mru.py
new file mode 100644
index 0000000..a486dc4
--- /dev/null
+++ b/src/cachetools/mru.py
@@ -0,0 +1,9 @@
+import warnings
+
+from . import MRUCache
+
+warnings.warn(
+ "cachetools.mru is deprecated, please use cachetools.MRUCache",
+ DeprecationWarning,
+ stacklevel=2,
+)
diff --git a/src/cachetools/rr.py b/src/cachetools/rr.py
new file mode 100644
index 0000000..81331bc
--- /dev/null
+++ b/src/cachetools/rr.py
@@ -0,0 +1,9 @@
+import warnings
+
+from . import RRCache
+
+warnings.warn(
+ "cachetools.rr is deprecated, please use cachetools.RRCache",
+ DeprecationWarning,
+ stacklevel=2,
+)
diff --git a/src/cachetools/ttl.py b/src/cachetools/ttl.py
new file mode 100644
index 0000000..ef343da
--- /dev/null
+++ b/src/cachetools/ttl.py
@@ -0,0 +1,9 @@
+import warnings
+
+from . import TTLCache
+
+warnings.warn(
+ "cachetools.ttl is deprecated, please use cachetools.TTLCache",
+ DeprecationWarning,
+ stacklevel=2,
+)