aboutsummaryrefslogtreecommitdiff
path: root/src/cachetools/func.py
blob: 57fb72dcd6f8d2d8edfc44359292968d8ba7c3f7 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
"""`functools.lru_cache` compatible memoizing function decorators."""

import collections
import functools
import math
import random
import time

try:
    from threading import RLock
except ImportError:  # pragma: no cover
    from dummy_threading import RLock

from . import keys
from .fifo import FIFOCache
from .lfu import LFUCache
from .lru import LRUCache
from .mru import MRUCache
from .rr import RRCache
from .ttl import TTLCache

__all__ = ("lfu_cache", "lru_cache", "mru_cache", "rr_cache", "ttl_cache")


_CacheInfo = collections.namedtuple(
    "CacheInfo", ["hits", "misses", "maxsize", "currsize"]
)


class _UnboundCache(dict):
    @property
    def maxsize(self):
        return None

    @property
    def currsize(self):
        return len(self)


class _UnboundTTLCache(TTLCache):
    def __init__(self, ttl, timer):
        TTLCache.__init__(self, math.inf, ttl, timer)

    @property
    def maxsize(self):
        return None


def _cache(cache, typed):
    maxsize = cache.maxsize

    def decorator(func):
        key = keys.typedkey if typed else keys.hashkey
        lock = RLock()
        stats = [0, 0]

        def wrapper(*args, **kwargs):
            k = key(*args, **kwargs)
            with lock:
                try:
                    v = cache[k]
                    stats[0] += 1
                    return v
                except KeyError:
                    stats[1] += 1
            v = func(*args, **kwargs)
            # in case of a race, prefer the item already in the cache
            try:
                with lock:
                    return cache.setdefault(k, v)
            except ValueError:
                return v  # value too large

        def cache_info():
            with lock:
                hits, misses = stats
                maxsize = cache.maxsize
                currsize = cache.currsize
            return _CacheInfo(hits, misses, maxsize, currsize)

        def cache_clear():
            with lock:
                try:
                    cache.clear()
                finally:
                    stats[:] = [0, 0]

        wrapper.cache_info = cache_info
        wrapper.cache_clear = cache_clear
        wrapper.cache_parameters = lambda: {"maxsize": maxsize, "typed": typed}
        functools.update_wrapper(wrapper, func)
        return wrapper

    return decorator


def fifo_cache(maxsize=128, typed=False):
    """Decorator to wrap a function with a memoizing callable that saves
    up to `maxsize` results based on a First In First Out (FIFO)
    algorithm.

    """
    if maxsize is None:
        return _cache(_UnboundCache(), typed)
    elif callable(maxsize):
        return _cache(FIFOCache(128), typed)(maxsize)
    else:
        return _cache(FIFOCache(maxsize), typed)


def lfu_cache(maxsize=128, typed=False):
    """Decorator to wrap a function with a memoizing callable that saves
    up to `maxsize` results based on a Least Frequently Used (LFU)
    algorithm.

    """
    if maxsize is None:
        return _cache(_UnboundCache(), typed)
    elif callable(maxsize):
        return _cache(LFUCache(128), typed)(maxsize)
    else:
        return _cache(LFUCache(maxsize), typed)


def lru_cache(maxsize=128, typed=False):
    """Decorator to wrap a function with a memoizing callable that saves
    up to `maxsize` results based on a Least Recently Used (LRU)
    algorithm.

    """
    if maxsize is None:
        return _cache(_UnboundCache(), typed)
    elif callable(maxsize):
        return _cache(LRUCache(128), typed)(maxsize)
    else:
        return _cache(LRUCache(maxsize), typed)


def mru_cache(maxsize=128, typed=False):
    """Decorator to wrap a function with a memoizing callable that saves
    up to `maxsize` results based on a Most Recently Used (MRU)
    algorithm.
    """
    if maxsize is None:
        return _cache(_UnboundCache(), typed)
    elif callable(maxsize):
        return _cache(MRUCache(128), typed)(maxsize)
    else:
        return _cache(MRUCache(maxsize), typed)


def rr_cache(maxsize=128, choice=random.choice, typed=False):
    """Decorator to wrap a function with a memoizing callable that saves
    up to `maxsize` results based on a Random Replacement (RR)
    algorithm.

    """
    if maxsize is None:
        return _cache(_UnboundCache(), typed)
    elif callable(maxsize):
        return _cache(RRCache(128, choice), typed)(maxsize)
    else:
        return _cache(RRCache(maxsize, choice), typed)


def ttl_cache(maxsize=128, ttl=600, timer=time.monotonic, typed=False):
    """Decorator to wrap a function with a memoizing callable that saves
    up to `maxsize` results based on a Least Recently Used (LRU)
    algorithm with a per-item time-to-live (TTL) value.
    """
    if maxsize is None:
        return _cache(_UnboundTTLCache(ttl, timer), typed)
    elif callable(maxsize):
        return _cache(TTLCache(128, ttl, timer), typed)(maxsize)
    else:
        return _cache(TTLCache(maxsize, ttl, timer), typed)