aboutsummaryrefslogtreecommitdiff
path: root/cachetools.py
blob: 24ef617598ccb12fb815067691b46716ec0600b6 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
"""Extensible memoizing collections and decorators"""

import collections
import functools
import operator
import random
import time

try:
    from threading import RLock
except ImportError:
    from dummy_threading import RLock

__version__ = '0.4.0'

_marker = object()


class _Link(object):
    __slots__ = 'prev', 'next', 'data'


class Cache(collections.MutableMapping):
    """Mutable mapping to serve as a cache.

    This class discards arbitrary items using :meth:`popitem` to make
    space when necessary.  Derived classes may override
    :meth:`popitem` to implement specific caching strategies.

    """

    def __init__(self, maxsize, getsizeof=None):
        if getsizeof is not None:
            self.getsizeof = getsizeof
        self.__mapping = dict()
        self.__maxsize = maxsize
        self.__currsize = 0

    def __getitem__(self, key):
        return self.__mapping[key][0]

    def __setitem__(self, key, value):
        mapping = self.__mapping
        maxsize = self.__maxsize
        size = self.getsizeof(value)
        if size > maxsize:
            raise ValueError('value too large')
        if key not in mapping or mapping[key][1] < size:
            while self.__currsize + size > maxsize:
                self.popitem()
        if key in mapping:
            self.__currsize -= mapping[key][1]
        mapping[key] = (value, size)
        self.__currsize += size

    def __delitem__(self, key):
        _, size = self.__mapping.pop(key)
        self.__currsize -= size

    def __iter__(self):
        return iter(self.__mapping)

    def __len__(self):
        return len(self.__mapping)

    def __repr__(self):
        return '%s(%r, maxsize=%d, currsize=%d)' % (
            self.__class__.__name__,
            list(self.items()),
            self.__maxsize,
            self.__currsize,
        )

    @property
    def maxsize(self):
        """Return the maximum size of the cache."""
        return self.__maxsize

    @property
    def currsize(self):
        """Return the current size of the cache."""
        return self.__currsize

    @staticmethod
    def getsizeof(object):
        """Return the size of a cache element."""
        return 1


class RRCache(Cache):
    """Random Replacement (RR) cache implementation.

    This cache randomly selects candidate items and discards them to
    make space when necessary.

    """

    def popitem(self):
        """Remove and return a random `(key, value)` pair."""
        try:
            key = random.choice(list(self))
        except IndexError:
            raise KeyError('cache is empty')
        return (key, self.pop(key))


class LFUCache(Cache):
    """Least Frequently Used (LFU) cache implementation.

    This cache counts how often an item is retrieved, and discards the
    items used least often to make space when necessary.

    """

    def __init__(self, maxsize, getsizeof=None):
        if getsizeof is not None:
            Cache.__init__(self, maxsize, lambda e: getsizeof(e[0]))
        else:
            Cache.__init__(self, maxsize)

    def __getitem__(self, key, cache_getitem=Cache.__getitem__):
        entry = cache_getitem(self, key)
        entry[1] += 1
        return entry[0]

    def __setitem__(self, key, value, cache_setitem=Cache.__setitem__):
        cache_setitem(self, key, [value, 0])

    def popitem(self):
        """Remove and return the `(key, value)` pair least frequently used."""
        items = ((key, Cache.__getitem__(self, key)[1]) for key in self)
        try:
            key, _ = min(items, key=operator.itemgetter(1))
        except ValueError:
            raise KeyError('cache is empty')
        return (key, self.pop(key))


class LRUCache(Cache):
    """Least Recently Used (LRU) cache implementation.

    This cache discards the least recently used items first to make
    space when necessary.

    """

    def __init__(self, maxsize, getsizeof=None):
        if getsizeof is not None:
            Cache.__init__(self, maxsize, lambda e: getsizeof(e[0]))
        else:
            Cache.__init__(self, maxsize)
        root = _Link()
        root.prev = root.next = root
        self.__root = root

    def __getitem__(self, key, cache_getitem=Cache.__getitem__):
        value, link = cache_getitem(self, key)
        root = self.__root
        link.prev.next = link.next
        link.next.prev = link.prev
        link.prev = tail = root.prev
        link.next = root
        tail.next = root.prev = link
        return value

    def __setitem__(self, key, value,
                    cache_getitem=Cache.__getitem__,
                    cache_setitem=Cache.__setitem__):
        try:
            _, link = cache_getitem(self, key)
        except KeyError:
            link = _Link()
        cache_setitem(self, key, (value, link))
        try:
            link.prev.next = link.next
            link.next.prev = link.prev
        except AttributeError:
            link.data = key
        root = self.__root
        link.prev = tail = root.prev
        link.next = root
        tail.next = root.prev = link

    def __delitem__(self, key,
                    cache_getitem=Cache.__getitem__,
                    cache_delitem=Cache.__delitem__):
        _, link = cache_getitem(self, key)
        cache_delitem(self, key)
        link.prev.next = link.next
        link.next.prev = link.prev
        del link.next
        del link.prev

    def popitem(self):
        """Remove and return the `(key, value)` pair least recently used."""
        root = self.__root
        link = root.next
        if link is root:
            raise KeyError('cache is empty')
        key = link.data
        return (key, self.pop(key))


class TTLCache(LRUCache):
    """LRU cache implementation with per-item time-to-live (TTL) value.

    This least-recently-used cache associates a time-to-live value
    with each item.  Items that expire because they have exceeded
    their time-to-live are removed from the cache automatically.

    """

    def __init__(self, maxsize, ttl, getsizeof=None, timer=time.time):
        if getsizeof is not None:
            LRUCache.__init__(self, maxsize, lambda e: getsizeof(e[0]))
        else:
            LRUCache.__init__(self, maxsize)
        root = _Link()
        root.prev = root.next = root
        self.__root = root
        self.__timer = timer
        self.__ttl = ttl

    def __getitem__(self, key,
                    cache_getitem=LRUCache.__getitem__,
                    cache_delitem=LRUCache.__delitem__):
        value, link = cache_getitem(self, key)
        if self.__timer() < link.data[1]:
            return value
        root = self.__root
        head = root.next
        link = link.next
        while head is not link:
            cache_delitem(self, head.data[0])
            head.next.prev = root
            head = root.next = head.next
        raise KeyError('%r has expired' % key)

    def __setitem__(self, key, value,
                    cache_getitem=LRUCache.__getitem__,
                    cache_setitem=LRUCache.__setitem__,
                    cache_delitem=LRUCache.__delitem__):
        root = self.__root
        head = root.next
        time = self.__timer()
        while head is not root and head.data[1] < time:
            cache_delitem(self, head.data[0])
            head.next.prev = root
            head = root.next = head.next
        try:
            _, link = cache_getitem(self, key)
        except KeyError:
            link = _Link()
        cache_setitem(self, key, (value, link))
        try:
            link.prev.next = link.next
            link.next.prev = link.prev
        except AttributeError:
            pass
        link.data = (key, time + self.__ttl)
        link.prev = tail = root.prev
        link.next = root
        tail.next = root.prev = link

    def __delitem__(self, key,
                    cache_getitem=LRUCache.__getitem__,
                    cache_delitem=LRUCache.__delitem__):
        _, link = cache_getitem(self, key)
        cache_delitem(self, key)
        link.prev.next = link.next
        link.next.prev = link.prev

    def __repr__(self, cache_getitem=LRUCache.__getitem__):
        return '%s(%r, maxsize=%d, currsize=%d)' % (
            self.__class__.__name__,
            [(key, cache_getitem(self, key)[0]) for key in self],
            self.maxsize,
            self.currsize,
        )

    def pop(self, key, default=_marker):
        try:
            value, link = LRUCache.__getitem__(self, key)
        except KeyError:
            if default is _marker:
                raise
            else:
                return default
        LRUCache.__delitem__(self, key)
        link.prev.next = link.next
        link.next.prev = link.prev
        del link.next
        del link.prev
        return value


CacheInfo = collections.namedtuple('CacheInfo', 'hits misses maxsize currsize')


def _makekey(args, kwargs):
    return (args, tuple(sorted(kwargs.items())))


def _makekey_typed(args, kwargs):
    key = _makekey(args, kwargs)
    key += tuple(type(v) for v in args)
    key += tuple(type(v) for k, v in sorted(kwargs.items()))
    return key


def _cachedfunc(cache, makekey, lock):
    def decorator(func):
        stats = [0, 0]

        def wrapper(*args, **kwargs):
            key = makekey(args, kwargs)
            with lock:
                try:
                    result = cache[key]
                    stats[0] += 1
                    return result
                except KeyError:
                    stats[1] += 1
            result = func(*args, **kwargs)
            with lock:
                cache[key] = result
            return result

        def cache_info():
            with lock:
                hits, misses = stats
                maxsize = cache.maxsize
                currsize = cache.currsize
            return CacheInfo(hits, misses, maxsize, currsize)

        def cache_clear():
            with lock:
                cache.clear()

        wrapper.cache_info = cache_info
        wrapper.cache_clear = cache_clear
        return functools.update_wrapper(wrapper, func)

    return decorator


def _cachedmeth(getcache, makekey):
    def decorator(func):
        def wrapper(self, *args, **kwargs):
            key = makekey((func,) + args, kwargs)
            cache = getcache(self)
            try:
                return cache[key]
            except KeyError:
                pass
            result = func(self, *args, **kwargs)
            cache[key] = result
            return result

        return functools.update_wrapper(wrapper, func)

    return decorator


def lru_cache(maxsize=128, typed=False, getsizeof=None, lock=RLock):
    """Decorator to wrap a function with a memoizing callable that saves
    up to `maxsize` results based on a Least Recently Used (LRU)
    algorithm.

    """
    makekey = _makekey_typed if typed else _makekey
    return _cachedfunc(LRUCache(maxsize, getsizeof), makekey, lock())


def lfu_cache(maxsize=128, typed=False, getsizeof=None, lock=RLock):
    """Decorator to wrap a function with a memoizing callable that saves
    up to `maxsize` results based on a Least Frequently Used (LFU)
    algorithm.

    """
    makekey = _makekey_typed if typed else _makekey
    return _cachedfunc(LFUCache(maxsize, getsizeof), makekey, lock())


def rr_cache(maxsize=128, typed=False, getsizeof=None, lock=RLock):
    """Decorator to wrap a function with a memoizing callable that saves
    up to `maxsize` results based on a Random Replacement (RR)
    algorithm.

    """
    makekey = _makekey_typed if typed else _makekey
    return _cachedfunc(RRCache(maxsize, getsizeof), makekey, lock())


def cachedmethod(cache, typed=False):
    """Decorator to wrap a class or instance method with a memoizing
    callable that saves results in a (possibly shared) cache.

    """
    makekey = _makekey_typed if typed else _makekey
    return _cachedmeth(cache, makekey)