aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorchojoyce <chojoyce@google.com>2021-12-05 11:04:44 +0800
committerJOYCE CHO <chojoyce@google.com>2021-12-08 07:53:36 +0000
commit39c4b449e88c875d90ac480bbb799214fd9502d3 (patch)
treef684194a77825219fcd99c9542361533c411d631
parent0377d4b9d25a15e2cc4284730a551a42b7a10d22 (diff)
parent2fd87a94b8d3861d80e9e4236cd480bfdd21c90d (diff)
downloadcachetools-39c4b449e88c875d90ac480bbb799214fd9502d3.tar.gz
Release platform/external/python/cachetools v4.2.4
Bug: 154879379 Test: m acloud && run acloud-dev create Exempt-From-Owner-Approval: release library Change-Id: I5b808f4e6212141cc654abac127b03b72b1a197a
-rw-r--r--.github/workflows/ci.yml22
-rw-r--r--.gitignore11
-rw-r--r--Android.bp29
-rw-r--r--CHANGELOG.rst369
-rw-r--r--LICENSE20
-rw-r--r--MANIFEST.in10
l---------NOTICE1
-rw-r--r--OWNERS3
-rw-r--r--README.rst109
-rw-r--r--docs/.gitignore1
-rw-r--r--docs/Makefile153
-rw-r--r--docs/conf.py24
-rw-r--r--docs/index.rst533
-rw-r--r--pyproject.toml3
-rw-r--r--setup.cfg47
-rw-r--r--setup.py3
-rw-r--r--src/cachetools/__init__.py596
-rw-r--r--src/cachetools/cache.py9
-rw-r--r--src/cachetools/fifo.py9
-rw-r--r--src/cachetools/func.py171
-rw-r--r--src/cachetools/keys.py52
-rw-r--r--src/cachetools/lfu.py9
-rw-r--r--src/cachetools/lru.py9
-rw-r--r--src/cachetools/mru.py9
-rw-r--r--src/cachetools/rr.py9
-rw-r--r--src/cachetools/ttl.py9
-rw-r--r--tests/__init__.py304
-rw-r--r--tests/test_cache.py10
-rw-r--r--tests/test_deprecated.py67
-rw-r--r--tests/test_fifo.py57
-rw-r--r--tests/test_func.py129
-rw-r--r--tests/test_keys.py57
-rw-r--r--tests/test_lfu.py50
-rw-r--r--tests/test_lru.py57
-rw-r--r--tests/test_method.py165
-rw-r--r--tests/test_mru.py50
-rw-r--r--tests/test_rr.py35
-rw-r--r--tests/test_ttl.py196
-rw-r--r--tests/test_wrapper.py153
-rw-r--r--tox.ini39
40 files changed, 3588 insertions, 1 deletions
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
new file mode 100644
index 0000000..1139f90
--- /dev/null
+++ b/.github/workflows/ci.yml
@@ -0,0 +1,22 @@
+name: CI
+
+on: [push, pull_request, workflow_dispatch]
+
+jobs:
+ main:
+ name: Python ${{ matrix.python }}
+ runs-on: ubuntu-20.04
+ strategy:
+ fail-fast: false
+ matrix:
+ python: ["3.6", "3.7", "3.8", "3.9", "3.10", "pypy-3.6", "pypy-3.7"]
+ steps:
+ - uses: actions/checkout@v2
+ - uses: actions/setup-python@v2
+ with:
+ python-version: ${{ matrix.python }}
+ - run: python -m pip install coverage tox
+ - run: python -m tox
+ - uses: codecov/codecov-action@v1
+ with:
+ name: ${{ matrix.python }}
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..235e4aa
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,11 @@
+*.egg-info
+*.pyc
+*.swp
+.cache/
+.coverage
+.tox/
+MANIFEST
+build/
+dist/
+docs/_build/
+.pytest_cache/
diff --git a/Android.bp b/Android.bp
new file mode 100644
index 0000000..3f92aff
--- /dev/null
+++ b/Android.bp
@@ -0,0 +1,29 @@
+// Copyright 2021 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+python_library {
+ name: "py-cachetools",
+ host_supported: true,
+ srcs: [
+ "src/cachetools/*.py",
+ ],
+ version: {
+ py2: {
+ enabled: true,
+ },
+ py3: {
+ enabled: true,
+ },
+ },
+} \ No newline at end of file
diff --git a/CHANGELOG.rst b/CHANGELOG.rst
new file mode 100644
index 0000000..0c24a03
--- /dev/null
+++ b/CHANGELOG.rst
@@ -0,0 +1,369 @@
+v4.2.4 (2021-09-30)
+===================
+
+- Add submodule shims for backward compatibility.
+
+
+v4.2.3 (2021-09-29)
+===================
+
+- Add documentation and tests for using ``TTLCache`` with
+ ``datetime``.
+
+- Link to typeshed typing stubs.
+
+- Flatten package file hierarchy.
+
+
+v4.2.2 (2021-04-27)
+===================
+
+- Update build environment.
+
+- Remove Python 2 remnants.
+
+- Format code with Black.
+
+
+v4.2.1 (2021-01-24)
+===================
+
+- Handle ``__missing__()`` not storing cache items.
+
+- Clean up ``__missing__()`` example.
+
+
+v4.2.0 (2020-12-10)
+===================
+
+- Add FIFO cache implementation.
+
+- Add MRU cache implementation.
+
+- Improve behavior of decorators in case of race conditions.
+
+- Improve documentation regarding mutability of caches values and use
+ of key functions with decorators.
+
+- Officially support Python 3.9.
+
+
+v4.1.1 (2020-06-28)
+===================
+
+- Improve ``popitem()`` exception context handling.
+
+- Replace ``float('inf')`` with ``math.inf``.
+
+- Improve "envkey" documentation example.
+
+
+v4.1.0 (2020-04-08)
+===================
+
+- Support ``user_function`` with ``cachetools.func`` decorators
+ (Python 3.8 compatibility).
+
+- Support ``cache_parameters()`` with ``cachetools.func`` decorators
+ (Python 3.9 compatibility).
+
+
+v4.0.0 (2019-12-15)
+===================
+
+- Require Python 3.5 or later.
+
+
+v3.1.1 (2019-05-23)
+===================
+
+- Document how to use shared caches with ``@cachedmethod``.
+
+- Fix pickling/unpickling of cache keys
+
+
+v3.1.0 (2019-01-29)
+===================
+
+- Fix Python 3.8 compatibility issue.
+
+- Use ``time.monotonic`` as default timer if available.
+
+- Improve documentation regarding thread safety.
+
+
+v3.0.0 (2018-11-04)
+===================
+
+- Officially support Python 3.7.
+
+- Drop Python 3.3 support (breaking change).
+
+- Remove ``missing`` cache constructor parameter (breaking change).
+
+- Remove ``self`` from ``@cachedmethod`` key arguments (breaking
+ change).
+
+- Add support for ``maxsize=None`` in ``cachetools.func`` decorators.
+
+
+v2.1.0 (2018-05-12)
+===================
+
+- Deprecate ``missing`` cache constructor parameter.
+
+- Handle overridden ``getsizeof()`` method in subclasses.
+
+- Fix Python 2.7 ``RRCache`` pickling issues.
+
+- Various documentation improvements.
+
+
+v2.0.1 (2017-08-11)
+===================
+
+- Officially support Python 3.6.
+
+- Move documentation to RTD.
+
+- Documentation: Update import paths for key functions (courtesy of
+ slavkoja).
+
+
+v2.0.0 (2016-10-03)
+===================
+
+- Drop Python 3.2 support (breaking change).
+
+- Drop support for deprecated features (breaking change).
+
+- Move key functions to separate package (breaking change).
+
+- Accept non-integer ``maxsize`` in ``Cache.__repr__()``.
+
+
+v1.1.6 (2016-04-01)
+===================
+
+- Reimplement ``LRUCache`` and ``TTLCache`` using
+ ``collections.OrderedDict``. Note that this will break pickle
+ compatibility with previous versions.
+
+- Fix ``TTLCache`` not calling ``__missing__()`` of derived classes.
+
+- Handle ``ValueError`` in ``Cache.__missing__()`` for consistency
+ with caching decorators.
+
+- Improve how ``TTLCache`` handles expired items.
+
+- Use ``Counter.most_common()`` for ``LFUCache.popitem()``.
+
+
+v1.1.5 (2015-10-25)
+===================
+
+- Refactor ``Cache`` base class. Note that this will break pickle
+ compatibility with previous versions.
+
+- Clean up ``LRUCache`` and ``TTLCache`` implementations.
+
+
+v1.1.4 (2015-10-24)
+===================
+
+- Refactor ``LRUCache`` and ``TTLCache`` implementations. Note that
+ this will break pickle compatibility with previous versions.
+
+- Document pending removal of deprecated features.
+
+- Minor documentation improvements.
+
+
+v1.1.3 (2015-09-15)
+===================
+
+- Fix pickle tests.
+
+
+v1.1.2 (2015-09-15)
+===================
+
+- Fix pickling of large ``LRUCache`` and ``TTLCache`` instances.
+
+
+v1.1.1 (2015-09-07)
+===================
+
+- Improve key functions.
+
+- Improve documentation.
+
+- Improve unit test coverage.
+
+
+v1.1.0 (2015-08-28)
+===================
+
+- Add ``@cached`` function decorator.
+
+- Add ``hashkey`` and ``typedkey`` fuctions.
+
+- Add `key` and `lock` arguments to ``@cachedmethod``.
+
+- Set ``__wrapped__`` attributes for Python versions < 3.2.
+
+- Move ``functools`` compatible decorators to ``cachetools.func``.
+
+- Deprecate ``@cachedmethod`` `typed` argument.
+
+- Deprecate `cache` attribute for ``@cachedmethod`` wrappers.
+
+- Deprecate `getsizeof` and `lock` arguments for `cachetools.func`
+ decorator.
+
+
+v1.0.3 (2015-06-26)
+===================
+
+- Clear cache statistics when calling ``clear_cache()``.
+
+
+v1.0.2 (2015-06-18)
+===================
+
+- Allow simple cache instances to be pickled.
+
+- Refactor ``Cache.getsizeof`` and ``Cache.missing`` default
+ implementation.
+
+
+v1.0.1 (2015-06-06)
+===================
+
+- Code cleanup for improved PEP 8 conformance.
+
+- Add documentation and unit tests for using ``@cachedmethod`` with
+ generic mutable mappings.
+
+- Improve documentation.
+
+
+v1.0.0 (2014-12-19)
+===================
+
+- Provide ``RRCache.choice`` property.
+
+- Improve documentation.
+
+
+v0.8.2 (2014-12-15)
+===================
+
+- Use a ``NestedTimer`` for ``TTLCache``.
+
+
+v0.8.1 (2014-12-07)
+===================
+
+- Deprecate ``Cache.getsize()``.
+
+
+v0.8.0 (2014-12-03)
+===================
+
+- Ignore ``ValueError`` raised on cache insertion in decorators.
+
+- Add ``Cache.getsize()``.
+
+- Add ``Cache.__missing__()``.
+
+- Feature freeze for `v1.0`.
+
+
+v0.7.1 (2014-11-22)
+===================
+
+- Fix `MANIFEST.in`.
+
+
+v0.7.0 (2014-11-12)
+===================
+
+- Deprecate ``TTLCache.ExpiredError``.
+
+- Add `choice` argument to ``RRCache`` constructor.
+
+- Refactor ``LFUCache``, ``LRUCache`` and ``TTLCache``.
+
+- Use custom ``NullContext`` implementation for unsynchronized
+ function decorators.
+
+
+v0.6.0 (2014-10-13)
+===================
+
+- Raise ``TTLCache.ExpiredError`` for expired ``TTLCache`` items.
+
+- Support unsynchronized function decorators.
+
+- Allow ``@cachedmethod.cache()`` to return None
+
+
+v0.5.1 (2014-09-25)
+===================
+
+- No formatting of ``KeyError`` arguments.
+
+- Update ``README.rst``.
+
+
+v0.5.0 (2014-09-23)
+===================
+
+- Do not delete expired items in TTLCache.__getitem__().
+
+- Add ``@ttl_cache`` function decorator.
+
+- Fix public ``getsizeof()`` usage.
+
+
+v0.4.0 (2014-06-16)
+===================
+
+- Add ``TTLCache``.
+
+- Add ``Cache`` base class.
+
+- Remove ``@cachedmethod`` `lock` parameter.
+
+
+v0.3.1 (2014-05-07)
+===================
+
+- Add proper locking for ``cache_clear()`` and ``cache_info()``.
+
+- Report `size` in ``cache_info()``.
+
+
+v0.3.0 (2014-05-06)
+===================
+
+- Remove ``@cache`` decorator.
+
+- Add ``size``, ``getsizeof`` members.
+
+- Add ``@cachedmethod`` decorator.
+
+
+v0.2.0 (2014-04-02)
+===================
+
+- Add ``@cache`` decorator.
+
+- Update documentation.
+
+
+v0.1.0 (2014-03-27)
+===================
+
+- Initial release.
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..fc2146e
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2014-2021 Thomas Kemmer
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644
index 0000000..fd9d6a9
--- /dev/null
+++ b/MANIFEST.in
@@ -0,0 +1,10 @@
+include CHANGELOG.rst
+include LICENSE
+include MANIFEST.in
+include README.rst
+include tox.ini
+
+recursive-include docs *
+prune docs/_build
+
+recursive-include tests *.py
diff --git a/NOTICE b/NOTICE
new file mode 120000
index 0000000..7a694c9
--- /dev/null
+++ b/NOTICE
@@ -0,0 +1 @@
+LICENSE \ No newline at end of file
diff --git a/OWNERS b/OWNERS
index 4d9907d..ea7403e 100644
--- a/OWNERS
+++ b/OWNERS
@@ -1 +1,2 @@
-yim@google.com
+chojoyce@google.com
+herbertxue@google.com
diff --git a/README.rst b/README.rst
new file mode 100644
index 0000000..869d3cb
--- /dev/null
+++ b/README.rst
@@ -0,0 +1,109 @@
+cachetools
+========================================================================
+
+.. image:: https://img.shields.io/pypi/v/cachetools
+ :target: https://pypi.org/project/cachetools/
+ :alt: Latest PyPI version
+
+.. image:: https://img.shields.io/readthedocs/cachetools
+ :target: https://cachetools.readthedocs.io/
+ :alt: Documentation build status
+
+.. image:: https://img.shields.io/github/workflow/status/tkem/cachetools/CI
+ :target: https://github.com/tkem/cachetools/actions/workflows/ci.yml
+ :alt: CI build status
+
+.. image:: https://img.shields.io/codecov/c/github/tkem/cachetools/master.svg
+ :target: https://codecov.io/gh/tkem/cachetools
+ :alt: Test coverage
+
+.. image:: https://img.shields.io/github/license/tkem/cachetools
+ :target: https://raw.github.com/tkem/cachetools/master/LICENSE
+ :alt: License
+
+.. image:: https://img.shields.io/badge/code%20style-black-000000.svg
+ :target: https://github.com/psf/black
+ :alt: Code style: black
+
+This module provides various memoizing collections and decorators,
+including variants of the Python Standard Library's `@lru_cache`_
+function decorator.
+
+.. code-block:: python
+
+ from cachetools import cached, LRUCache, TTLCache
+
+ # speed up calculating Fibonacci numbers with dynamic programming
+ @cached(cache={})
+ def fib(n):
+ return n if n < 2 else fib(n - 1) + fib(n - 2)
+
+ # cache least recently used Python Enhancement Proposals
+ @cached(cache=LRUCache(maxsize=32))
+ def get_pep(num):
+ url = 'http://www.python.org/dev/peps/pep-%04d/' % num
+ with urllib.request.urlopen(url) as s:
+ return s.read()
+
+ # cache weather data for no longer than ten minutes
+ @cached(cache=TTLCache(maxsize=1024, ttl=600))
+ def get_weather(place):
+ return owm.weather_at_place(place).get_weather()
+
+For the purpose of this module, a *cache* is a mutable_ mapping_ of a
+fixed maximum size. When the cache is full, i.e. by adding another
+item the cache would exceed its maximum size, the cache must choose
+which item(s) to discard based on a suitable `cache algorithm`_. In
+general, a cache's size is the total size of its items, and an item's
+size is a property or function of its value, e.g. the result of
+``sys.getsizeof(value)``. For the trivial but common case that each
+item counts as ``1``, a cache's size is equal to the number of its
+items, or ``len(cache)``.
+
+Multiple cache classes based on different caching algorithms are
+implemented, and decorators for easily memoizing function and method
+calls are provided, too.
+
+
+Installation
+------------------------------------------------------------------------
+
+cachetools is available from PyPI_ and can be installed by running::
+
+ pip install cachetools
+
+Typing stubs for this package are provided by typeshed_ and can be
+installed by running::
+
+ pip install types-cachetools
+
+
+Project Resources
+------------------------------------------------------------------------
+
+- `Documentation`_
+- `Issue tracker`_
+- `Source code`_
+- `Change log`_
+
+
+License
+------------------------------------------------------------------------
+
+Copyright (c) 2014-2021 Thomas Kemmer.
+
+Licensed under the `MIT License`_.
+
+
+.. _@lru_cache: https://docs.python.org/3/library/functools.html#functools.lru_cache
+.. _mutable: https://docs.python.org/dev/glossary.html#term-mutable
+.. _mapping: https://docs.python.org/dev/glossary.html#term-mapping
+.. _cache algorithm: https://en.wikipedia.org/wiki/Cache_algorithms
+
+.. _PyPI: https://pypi.org/project/cachetools/
+.. _typeshed: https://github.com/python/typeshed/
+.. _Documentation: https://cachetools.readthedocs.io/
+.. _Issue tracker: https://github.com/tkem/cachetools/issues/
+.. _Source code: https://github.com/tkem/cachetools/
+.. _Change log: https://github.com/tkem/cachetools/blob/master/CHANGELOG.rst
+.. _MIT License: https://raw.github.com/tkem/cachetools/master/LICENSE
diff --git a/docs/.gitignore b/docs/.gitignore
new file mode 100644
index 0000000..e35d885
--- /dev/null
+++ b/docs/.gitignore
@@ -0,0 +1 @@
+_build
diff --git a/docs/Makefile b/docs/Makefile
new file mode 100644
index 0000000..c88ce22
--- /dev/null
+++ b/docs/Makefile
@@ -0,0 +1,153 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS =
+SPHINXBUILD = sphinx-build
+PAPER =
+BUILDDIR = _build
+
+# Internal variables.
+PAPEROPT_a4 = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+# the i18n builder cannot share the environment and doctrees with the others
+I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+
+.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
+
+help:
+ @echo "Please use \`make <target>' where <target> is one of"
+ @echo " html to make standalone HTML files"
+ @echo " dirhtml to make HTML files named index.html in directories"
+ @echo " singlehtml to make a single large HTML file"
+ @echo " pickle to make pickle files"
+ @echo " json to make JSON files"
+ @echo " htmlhelp to make HTML files and a HTML help project"
+ @echo " qthelp to make HTML files and a qthelp project"
+ @echo " devhelp to make HTML files and a Devhelp project"
+ @echo " epub to make an epub"
+ @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+ @echo " latexpdf to make LaTeX files and run them through pdflatex"
+ @echo " text to make text files"
+ @echo " man to make manual pages"
+ @echo " texinfo to make Texinfo files"
+ @echo " info to make Texinfo files and run them through makeinfo"
+ @echo " gettext to make PO message catalogs"
+ @echo " changes to make an overview of all changed/added/deprecated items"
+ @echo " linkcheck to check all external links for integrity"
+ @echo " doctest to run all doctests embedded in the documentation (if enabled)"
+
+clean:
+ -rm -rf $(BUILDDIR)/*
+
+html:
+ $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
+
+dirhtml:
+ $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
+
+singlehtml:
+ $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
+ @echo
+ @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
+
+pickle:
+ $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
+ @echo
+ @echo "Build finished; now you can process the pickle files."
+
+json:
+ $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
+ @echo
+ @echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+ $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
+ @echo
+ @echo "Build finished; now you can run HTML Help Workshop with the" \
+ ".hhp project file in $(BUILDDIR)/htmlhelp."
+
+qthelp:
+ $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
+ @echo
+ @echo "Build finished; now you can run "qcollectiongenerator" with the" \
+ ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
+ @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/cachetools.qhcp"
+ @echo "To view the help file:"
+ @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/cachetools.qhc"
+
+devhelp:
+ $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
+ @echo
+ @echo "Build finished."
+ @echo "To view the help file:"
+ @echo "# mkdir -p $$HOME/.local/share/devhelp/cachetools"
+ @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/cachetools"
+ @echo "# devhelp"
+
+epub:
+ $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
+ @echo
+ @echo "Build finished. The epub file is in $(BUILDDIR)/epub."
+
+latex:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo
+ @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
+ @echo "Run \`make' in that directory to run these through (pdf)latex" \
+ "(use \`make latexpdf' here to do that automatically)."
+
+latexpdf:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo "Running LaTeX files through pdflatex..."
+ $(MAKE) -C $(BUILDDIR)/latex all-pdf
+ @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+text:
+ $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
+ @echo
+ @echo "Build finished. The text files are in $(BUILDDIR)/text."
+
+man:
+ $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
+ @echo
+ @echo "Build finished. The manual pages are in $(BUILDDIR)/man."
+
+texinfo:
+ $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+ @echo
+ @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
+ @echo "Run \`make' in that directory to run these through makeinfo" \
+ "(use \`make info' here to do that automatically)."
+
+info:
+ $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+ @echo "Running Texinfo files through makeinfo..."
+ make -C $(BUILDDIR)/texinfo info
+ @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
+
+gettext:
+ $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
+ @echo
+ @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
+
+changes:
+ $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
+ @echo
+ @echo "The overview file is in $(BUILDDIR)/changes."
+
+linkcheck:
+ $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
+ @echo
+ @echo "Link check complete; look for any errors in the above output " \
+ "or in $(BUILDDIR)/linkcheck/output.txt."
+
+doctest:
+ $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
+ @echo "Testing of doctests in the sources finished, look at the " \
+ "results in $(BUILDDIR)/doctest/output.txt."
diff --git a/docs/conf.py b/docs/conf.py
new file mode 100644
index 0000000..f55e5a5
--- /dev/null
+++ b/docs/conf.py
@@ -0,0 +1,24 @@
+def get_version():
+ import configparser
+ import pathlib
+
+ cp = configparser.ConfigParser()
+ # Python 3.5 ConfigParser does not accept Path as filename
+ cp.read(str(pathlib.Path(__file__).parent.parent / "setup.cfg"))
+ return cp["metadata"]["version"]
+
+
+project = "cachetools"
+copyright = "2014-2021 Thomas Kemmer"
+version = get_version()
+release = version
+
+extensions = [
+ "sphinx.ext.autodoc",
+ "sphinx.ext.coverage",
+ "sphinx.ext.doctest",
+ "sphinx.ext.todo",
+]
+exclude_patterns = ["_build"]
+master_doc = "index"
+html_theme = "default"
diff --git a/docs/index.rst b/docs/index.rst
new file mode 100644
index 0000000..c9828b3
--- /dev/null
+++ b/docs/index.rst
@@ -0,0 +1,533 @@
+*********************************************************************
+:mod:`cachetools` --- Extensible memoizing collections and decorators
+*********************************************************************
+
+.. module:: cachetools
+
+This module provides various memoizing collections and decorators,
+including variants of the Python Standard Library's `@lru_cache`_
+function decorator.
+
+For the purpose of this module, a *cache* is a mutable_ mapping_ of a
+fixed maximum size. When the cache is full, i.e. by adding another
+item the cache would exceed its maximum size, the cache must choose
+which item(s) to discard based on a suitable `cache algorithm`_. In
+general, a cache's size is the total size of its items, and an item's
+size is a property or function of its value, e.g. the result of
+``sys.getsizeof(value)``. For the trivial but common case that each
+item counts as :const:`1`, a cache's size is equal to the number of
+its items, or ``len(cache)``.
+
+Multiple cache classes based on different caching algorithms are
+implemented, and decorators for easily memoizing function and method
+calls are provided, too.
+
+
+.. testsetup:: *
+
+ import operator
+ from cachetools import cached, cachedmethod, LRUCache, TTLCache
+
+ from unittest import mock
+ urllib = mock.MagicMock()
+
+
+Cache implementations
+=====================
+
+This module provides several classes implementing caches using
+different cache algorithms. All these classes derive from class
+:class:`Cache`, which in turn derives from
+:class:`collections.MutableMapping`, and provide :attr:`maxsize` and
+:attr:`currsize` properties to retrieve the maximum and current size
+of the cache. When a cache is full, :meth:`Cache.__setitem__()` calls
+:meth:`self.popitem()` repeatedly until there is enough room for the
+item to be added.
+
+:class:`Cache` also features a :meth:`getsizeof` method, which returns
+the size of a given `value`. The default implementation of
+:meth:`getsizeof` returns :const:`1` irrespective of its argument,
+making the cache's size equal to the number of its items, or
+``len(cache)``. For convenience, all cache classes accept an optional
+named constructor parameter `getsizeof`, which may specify a function
+of one argument used to retrieve the size of an item's value.
+
+Note that the values of a :class:`Cache` are mutable by default, as
+are e.g. the values of a :class:`dict`. It is the user's
+responsibility to take care that cached values are not accidentally
+modified. This is especially important when using a custom
+`getsizeof` function, since the size of an item's value will only be
+computed when the item is inserted into the cache.
+
+.. note::
+
+ Please be aware that all these classes are *not* thread-safe.
+ Access to a shared cache from multiple threads must be properly
+ synchronized, e.g. by using one of the memoizing decorators with a
+ suitable `lock` object.
+
+.. autoclass:: Cache(maxsize, getsizeof=None)
+ :members: currsize, getsizeof, maxsize
+
+ This class discards arbitrary items using :meth:`popitem` to make
+ space when necessary. Derived classes may override :meth:`popitem`
+ to implement specific caching strategies. If a subclass has to
+ keep track of item access, insertion or deletion, it may
+ additionally need to override :meth:`__getitem__`,
+ :meth:`__setitem__` and :meth:`__delitem__`.
+
+.. autoclass:: FIFOCache(maxsize, getsizeof=None)
+ :members: popitem
+
+ This class evicts items in the order they were added to make space
+ when necessary.
+
+.. autoclass:: LFUCache(maxsize, getsizeof=None)
+ :members: popitem
+
+ This class counts how often an item is retrieved, and discards the
+ items used least often to make space when necessary.
+
+.. autoclass:: LRUCache(maxsize, getsizeof=None)
+ :members: popitem
+
+ This class discards the least recently used items first to make
+ space when necessary.
+
+.. autoclass:: MRUCache(maxsize, getsizeof=None)
+ :members: popitem
+
+ This class discards the most recently used items first to make
+ space when necessary.
+
+.. autoclass:: RRCache(maxsize, choice=random.choice, getsizeof=None)
+ :members: choice, popitem
+
+ This class randomly selects candidate items and discards them to
+ make space when necessary.
+
+ By default, items are selected from the list of cache keys using
+ :func:`random.choice`. The optional argument `choice` may specify
+ an alternative function that returns an arbitrary element from a
+ non-empty sequence.
+
+.. autoclass:: TTLCache(maxsize, ttl, timer=time.monotonic, getsizeof=None)
+ :members: popitem, timer, ttl
+
+ This class associates a time-to-live value with each item. Items
+ that expire because they have exceeded their time-to-live will be
+ no longer accessible, and will be removed eventually. If no
+ expired items are there to remove, the least recently used items
+ will be discarded first to make space when necessary.
+
+ By default, the time-to-live is specified in seconds and
+ :func:`time.monotonic` is used to retrieve the current time. A
+ custom `timer` function can also be supplied:
+
+ .. testcode::
+
+ from datetime import datetime, timedelta
+
+ cache = TTLCache(maxsize=10, ttl=timedelta(hours=12), timer=datetime.now)
+
+ The expression `timer() + ttl` at the time of insertion defines the
+ expiration time of a cache item, and must be comparable against
+ later results of `timer()`.
+
+ .. method:: expire(self, time=None)
+
+ Expired items will be removed from a cache only at the next
+ mutating operation, e.g. :meth:`__setitem__` or
+ :meth:`__delitem__`, and therefore may still claim memory.
+ Calling this method removes all items whose time-to-live would
+ have expired by `time`, so garbage collection is free to reuse
+ their memory. If `time` is :const:`None`, this removes all
+ items that have expired by the current value returned by
+ :attr:`timer`.
+
+
+Extending cache classes
+-----------------------
+
+Sometimes it may be desirable to notice when and what cache items are
+evicted, i.e. removed from a cache to make room for new items. Since
+all cache implementations call :meth:`popitem` to evict items from the
+cache, this can be achieved by overriding this method in a subclass:
+
+.. doctest::
+ :pyversion: >= 3
+
+ >>> class MyCache(LRUCache):
+ ... def popitem(self):
+ ... key, value = super().popitem()
+ ... print('Key "%s" evicted with value "%s"' % (key, value))
+ ... return key, value
+
+ >>> c = MyCache(maxsize=2)
+ >>> c['a'] = 1
+ >>> c['b'] = 2
+ >>> c['c'] = 3
+ Key "a" evicted with value "1"
+
+Similar to the standard library's :class:`collections.defaultdict`,
+subclasses of :class:`Cache` may implement a :meth:`__missing__`
+method which is called by :meth:`Cache.__getitem__` if the requested
+key is not found:
+
+.. doctest::
+ :pyversion: >= 3
+
+ >>> class PepStore(LRUCache):
+ ... def __missing__(self, key):
+ ... """Retrieve text of a Python Enhancement Proposal"""
+ ... url = 'http://www.python.org/dev/peps/pep-%04d/' % key
+ ... with urllib.request.urlopen(url) as s:
+ ... pep = s.read()
+ ... self[key] = pep # store text in cache
+ ... return pep
+
+ >>> peps = PepStore(maxsize=4)
+ >>> for n in 8, 9, 290, 308, 320, 8, 218, 320, 279, 289, 320:
+ ... pep = peps[n]
+ >>> print(sorted(peps.keys()))
+ [218, 279, 289, 320]
+
+Note, though, that such a class does not really behave like a *cache*
+any more, and will lead to surprising results when used with any of
+the memoizing decorators described below. However, it may be useful
+in its own right.
+
+
+Memoizing decorators
+====================
+
+The :mod:`cachetools` module provides decorators for memoizing
+function and method calls. This can save time when a function is
+often called with the same arguments:
+
+.. doctest::
+
+ >>> @cached(cache={})
+ ... def fib(n):
+ ... 'Compute the nth number in the Fibonacci sequence'
+ ... return n if n < 2 else fib(n - 1) + fib(n - 2)
+
+ >>> fib(42)
+ 267914296
+
+.. decorator:: cached(cache, key=cachetools.keys.hashkey, lock=None)
+
+ Decorator to wrap a function with a memoizing callable that saves
+ results in a cache.
+
+ The `cache` argument specifies a cache object to store previous
+ function arguments and return values. Note that `cache` need not
+ be an instance of the cache implementations provided by the
+ :mod:`cachetools` module. :func:`cached` will work with any
+ mutable mapping type, including plain :class:`dict` and
+ :class:`weakref.WeakValueDictionary`.
+
+ `key` specifies a function that will be called with the same
+ positional and keyword arguments as the wrapped function itself,
+ and which has to return a suitable cache key. Since caches are
+ mappings, the object returned by `key` must be hashable. The
+ default is to call :func:`cachetools.keys.hashkey`.
+
+ If `lock` is not :const:`None`, it must specify an object
+ implementing the `context manager`_ protocol. Any access to the
+ cache will then be nested in a ``with lock:`` statement. This can
+ be used for synchronizing thread access to the cache by providing a
+ :class:`threading.Lock` instance, for example.
+
+ .. note::
+
+ The `lock` context manager is used only to guard access to the
+ cache object. The underlying wrapped function will be called
+ outside the `with` statement, and must be thread-safe by itself.
+
+ The original underlying function is accessible through the
+ :attr:`__wrapped__` attribute of the memoizing wrapper function.
+ This can be used for introspection or for bypassing the cache.
+
+ To perform operations on the cache object, for example to clear the
+ cache during runtime, the cache should be assigned to a variable.
+ When a `lock` object is used, any access to the cache from outside
+ the function wrapper should also be performed within an appropriate
+ `with` statement:
+
+ .. testcode::
+
+ from cachetools.keys import hashkey
+ from threading import Lock
+
+ cache = LRUCache(maxsize=32)
+ lock = Lock()
+
+ @cached(cache, key=hashkey, lock=lock)
+ def get_pep(num):
+ 'Retrieve text of a Python Enhancement Proposal'
+ url = 'http://www.python.org/dev/peps/pep-%04d/' % num
+ with urllib.request.urlopen(url) as s:
+ return s.read()
+
+ # make sure access to cache is synchronized
+ with lock:
+ cache.clear()
+
+ # always use the key function for accessing cache items
+ with lock:
+ cache.pop(hashkey(42), None)
+
+ It is also possible to use a single shared cache object with
+ multiple functions. However, care must be taken that different
+ cache keys are generated for each function, even for identical
+ function arguments:
+
+ .. doctest::
+ :options: +ELLIPSIS
+
+ >>> from cachetools.keys import hashkey
+ >>> from functools import partial
+
+ >>> # shared cache for integer sequences
+ >>> numcache = {}
+
+ >>> # compute Fibonacci numbers
+ >>> @cached(numcache, key=partial(hashkey, 'fib'))
+ ... def fib(n):
+ ... return n if n < 2 else fib(n - 1) + fib(n - 2)
+
+ >>> # compute Lucas numbers
+ >>> @cached(numcache, key=partial(hashkey, 'luc'))
+ ... def luc(n):
+ ... return 2 - n if n < 2 else luc(n - 1) + luc(n - 2)
+
+ >>> fib(42)
+ 267914296
+ >>> luc(42)
+ 599074578
+ >>> list(sorted(numcache.items()))
+ [..., (('fib', 42), 267914296), ..., (('luc', 42), 599074578)]
+
+
+.. decorator:: cachedmethod(cache, key=cachetools.keys.hashkey, lock=None)
+
+ Decorator to wrap a class or instance method with a memoizing
+ callable that saves results in a (possibly shared) cache.
+
+ The main difference between this and the :func:`cached` function
+ decorator is that `cache` and `lock` are not passed objects, but
+ functions. Both will be called with :const:`self` (or :const:`cls`
+ for class methods) as their sole argument to retrieve the cache or
+ lock object for the method's respective instance or class.
+
+ .. note::
+
+ As with :func:`cached`, the context manager obtained by calling
+ ``lock(self)`` will only guard access to the cache itself. It
+ is the user's responsibility to handle concurrent calls to the
+ underlying wrapped method in a multithreaded environment.
+
+ One advantage of :func:`cachedmethod` over the :func:`cached`
+ function decorator is that cache properties such as `maxsize` can
+ be set at runtime:
+
+ .. testcode::
+
+ class CachedPEPs(object):
+
+ def __init__(self, cachesize):
+ self.cache = LRUCache(maxsize=cachesize)
+
+ @cachedmethod(operator.attrgetter('cache'))
+ def get(self, num):
+ """Retrieve text of a Python Enhancement Proposal"""
+ url = 'http://www.python.org/dev/peps/pep-%04d/' % num
+ with urllib.request.urlopen(url) as s:
+ return s.read()
+
+ peps = CachedPEPs(cachesize=10)
+ print("PEP #1: %s" % peps.get(1))
+
+ .. testoutput::
+ :hide:
+ :options: +ELLIPSIS
+
+ PEP #1: ...
+
+
+ When using a shared cache for multiple methods, be aware that
+ different cache keys must be created for each method even when
+ function arguments are the same, just as with the `@cached`
+ decorator:
+
+ .. testcode::
+
+ class CachedReferences(object):
+
+ def __init__(self, cachesize):
+ self.cache = LRUCache(maxsize=cachesize)
+
+ @cachedmethod(lambda self: self.cache, key=partial(hashkey, 'pep'))
+ def get_pep(self, num):
+ """Retrieve text of a Python Enhancement Proposal"""
+ url = 'http://www.python.org/dev/peps/pep-%04d/' % num
+ with urllib.request.urlopen(url) as s:
+ return s.read()
+
+ @cachedmethod(lambda self: self.cache, key=partial(hashkey, 'rfc'))
+ def get_rfc(self, num):
+ """Retrieve text of an IETF Request for Comments"""
+ url = 'https://tools.ietf.org/rfc/rfc%d.txt' % num
+ with urllib.request.urlopen(url) as s:
+ return s.read()
+
+ docs = CachedReferences(cachesize=100)
+ print("PEP #1: %s" % docs.get_pep(1))
+ print("RFC #1: %s" % docs.get_rfc(1))
+
+ .. testoutput::
+ :hide:
+ :options: +ELLIPSIS
+
+ PEP #1: ...
+ RFC #1: ...
+
+
+*****************************************************************
+:mod:`cachetools.keys` --- Key functions for memoizing decorators
+*****************************************************************
+
+.. module:: cachetools.keys
+
+This module provides several functions that can be used as key
+functions with the :func:`cached` and :func:`cachedmethod` decorators:
+
+.. autofunction:: hashkey
+
+ This function returns a :class:`tuple` instance suitable as a cache
+ key, provided the positional and keywords arguments are hashable.
+
+.. autofunction:: typedkey
+
+ This function is similar to :func:`hashkey`, but arguments of
+ different types will yield distinct cache keys. For example,
+ ``typedkey(3)`` and ``typedkey(3.0)`` will return different
+ results.
+
+These functions can also be helpful when implementing custom key
+functions for handling some non-hashable arguments. For example,
+calling the following function with a dictionary as its `env` argument
+will raise a :class:`TypeError`, since :class:`dict` is not hashable::
+
+ @cached(LRUCache(maxsize=128))
+ def foo(x, y, z, env={}):
+ pass
+
+However, if `env` always holds only hashable values itself, a custom
+key function can be written that handles the `env` keyword argument
+specially::
+
+ def envkey(*args, env={}, **kwargs):
+ key = hashkey(*args, **kwargs)
+ key += tuple(sorted(env.items()))
+ return key
+
+The :func:`envkey` function can then be used in decorator declarations
+like this::
+
+ @cached(LRUCache(maxsize=128), key=envkey)
+ def foo(x, y, z, env={}):
+ pass
+
+ foo(1, 2, 3, env=dict(a='a', b='b'))
+
+
+****************************************************************************
+:mod:`cachetools.func` --- :func:`functools.lru_cache` compatible decorators
+****************************************************************************
+
+.. module:: cachetools.func
+
+To ease migration from (or to) Python 3's :func:`functools.lru_cache`,
+this module provides several memoizing function decorators with a
+similar API. All these decorators wrap a function with a memoizing
+callable that saves up to the `maxsize` most recent calls, using
+different caching strategies. If `maxsize` is set to :const:`None`,
+the caching strategy is effectively disabled and the cache can grow
+without bound.
+
+If the optional argument `typed` is set to :const:`True`, function
+arguments of different types will be cached separately. For example,
+``f(3)`` and ``f(3.0)`` will be treated as distinct calls with
+distinct results.
+
+If a `user_function` is specified instead, it must be a callable.
+This allows the decorator to be applied directly to a user function,
+leaving the `maxsize` at its default value of 128::
+
+ @cachetools.func.lru_cache
+ def count_vowels(sentence):
+ sentence = sentence.casefold()
+ return sum(sentence.count(vowel) for vowel in 'aeiou')
+
+The wrapped function is instrumented with a :func:`cache_parameters`
+function that returns a new :class:`dict` showing the values for
+`maxsize` and `typed`. This is for information purposes only.
+Mutating the values has no effect.
+
+The wrapped function is also instrumented with :func:`cache_info` and
+:func:`cache_clear` functions to provide information about cache
+performance and clear the cache. Please see the
+:func:`functools.lru_cache` documentation for details. Also note that
+all the decorators in this module are thread-safe by default.
+
+
+.. decorator:: fifo_cache(user_function)
+ fifo_cache(maxsize=128, typed=False)
+
+ Decorator that wraps a function with a memoizing callable that
+ saves up to `maxsize` results based on a First In First Out
+ (FIFO) algorithm.
+
+.. decorator:: lfu_cache(user_function)
+ lfu_cache(maxsize=128, typed=False)
+
+ Decorator that wraps a function with a memoizing callable that
+ saves up to `maxsize` results based on a Least Frequently Used
+ (LFU) algorithm.
+
+.. decorator:: lru_cache(user_function)
+ lru_cache(maxsize=128, typed=False)
+
+ Decorator that wraps a function with a memoizing callable that
+ saves up to `maxsize` results based on a Least Recently Used (LRU)
+ algorithm.
+
+.. decorator:: mru_cache(user_function)
+ mru_cache(maxsize=128, typed=False)
+
+ Decorator that wraps a function with a memoizing callable that
+ saves up to `maxsize` results based on a Most Recently Used (MRU)
+ algorithm.
+
+.. decorator:: rr_cache(user_function)
+ rr_cache(maxsize=128, choice=random.choice, typed=False)
+
+ Decorator that wraps a function with a memoizing callable that
+ saves up to `maxsize` results based on a Random Replacement (RR)
+ algorithm.
+
+.. decorator:: ttl_cache(user_function)
+ ttl_cache(maxsize=128, ttl=600, timer=time.monotonic, typed=False)
+
+ Decorator to wrap a function with a memoizing callable that saves
+ up to `maxsize` results based on a Least Recently Used (LRU)
+ algorithm with a per-item time-to-live (TTL) value.
+
+
+.. _@lru_cache: http://docs.python.org/3/library/functools.html#functools.lru_cache
+.. _cache algorithm: http://en.wikipedia.org/wiki/Cache_algorithms
+.. _context manager: http://docs.python.org/dev/glossary.html#term-context-manager
+.. _mapping: http://docs.python.org/dev/glossary.html#term-mapping
+.. _mutable: http://docs.python.org/dev/glossary.html#term-mutable
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 0000000..f87c03a
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,3 @@
+[build-system]
+requires = ["setuptools >= 46.4.0", "wheel"]
+build-backend = "setuptools.build_meta"
diff --git a/setup.cfg b/setup.cfg
new file mode 100644
index 0000000..74bcbc5
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,47 @@
+[metadata]
+name = cachetools
+version = attr: cachetools.__version__
+url = https://github.com/tkem/cachetools/
+author = Thomas Kemmer
+author_email = tkemmer@computer.org
+license = MIT
+license_file = LICENSE
+description = Extensible memoizing collections and decorators
+long_description = file: README.rst
+classifiers =
+ Development Status :: 5 - Production/Stable
+ Environment :: Other Environment
+ Intended Audience :: Developers
+ License :: OSI Approved :: MIT License
+ Operating System :: OS Independent
+ Programming Language :: Python
+ Programming Language :: Python :: 3
+ Programming Language :: Python :: 3.5
+ Programming Language :: Python :: 3.6
+ Programming Language :: Python :: 3.7
+ Programming Language :: Python :: 3.8
+ Programming Language :: Python :: 3.9
+ Programming Language :: Python :: 3.10
+ Topic :: Software Development :: Libraries :: Python Modules
+
+[options]
+package_dir =
+ = src
+packages = find:
+python_requires = ~= 3.5
+
+[options.packages.find]
+where = src
+
+[flake8]
+max-line-length = 80
+exclude = .git, .tox, build
+select = C, E, F, W, B, B950, I, N
+# F401: imported but unused (submodule shims)
+# E501: line too long (black)
+ignore = F401, E501
+
+[build_sphinx]
+source-dir = docs/
+build-dir = docs/_build
+all_files = 1
diff --git a/setup.py b/setup.py
new file mode 100644
index 0000000..6068493
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,3 @@
+from setuptools import setup
+
+setup()
diff --git a/src/cachetools/__init__.py b/src/cachetools/__init__.py
new file mode 100644
index 0000000..42822f0
--- /dev/null
+++ b/src/cachetools/__init__.py
@@ -0,0 +1,596 @@
+"""Extensible memoizing collections and decorators."""
+
+__all__ = (
+ "Cache",
+ "FIFOCache",
+ "LFUCache",
+ "LRUCache",
+ "MRUCache",
+ "RRCache",
+ "TTLCache",
+ "cached",
+ "cachedmethod",
+)
+
+__version__ = "4.2.4"
+
+import collections
+import collections.abc
+import functools
+import random
+import time
+
+from .keys import hashkey
+
+
+class _DefaultSize:
+
+ __slots__ = ()
+
+ def __getitem__(self, _):
+ return 1
+
+ def __setitem__(self, _, value):
+ assert value == 1
+
+ def pop(self, _):
+ return 1
+
+
+class Cache(collections.abc.MutableMapping):
+ """Mutable mapping to serve as a simple cache or cache base class."""
+
+ __marker = object()
+
+ __size = _DefaultSize()
+
+ def __init__(self, maxsize, getsizeof=None):
+ if getsizeof:
+ self.getsizeof = getsizeof
+ if self.getsizeof is not Cache.getsizeof:
+ self.__size = dict()
+ self.__data = dict()
+ self.__currsize = 0
+ self.__maxsize = maxsize
+
+ def __repr__(self):
+ return "%s(%r, maxsize=%r, currsize=%r)" % (
+ self.__class__.__name__,
+ list(self.__data.items()),
+ self.__maxsize,
+ self.__currsize,
+ )
+
+ def __getitem__(self, key):
+ try:
+ return self.__data[key]
+ except KeyError:
+ return self.__missing__(key)
+
+ def __setitem__(self, key, value):
+ maxsize = self.__maxsize
+ size = self.getsizeof(value)
+ if size > maxsize:
+ raise ValueError("value too large")
+ if key not in self.__data or self.__size[key] < size:
+ while self.__currsize + size > maxsize:
+ self.popitem()
+ if key in self.__data:
+ diffsize = size - self.__size[key]
+ else:
+ diffsize = size
+ self.__data[key] = value
+ self.__size[key] = size
+ self.__currsize += diffsize
+
+ def __delitem__(self, key):
+ size = self.__size.pop(key)
+ del self.__data[key]
+ self.__currsize -= size
+
+ def __contains__(self, key):
+ return key in self.__data
+
+ def __missing__(self, key):
+ raise KeyError(key)
+
+ def __iter__(self):
+ return iter(self.__data)
+
+ def __len__(self):
+ return len(self.__data)
+
+ def get(self, key, default=None):
+ if key in self:
+ return self[key]
+ else:
+ return default
+
+ def pop(self, key, default=__marker):
+ if key in self:
+ value = self[key]
+ del self[key]
+ elif default is self.__marker:
+ raise KeyError(key)
+ else:
+ value = default
+ return value
+
+ def setdefault(self, key, default=None):
+ if key in self:
+ value = self[key]
+ else:
+ self[key] = value = default
+ return value
+
+ @property
+ def maxsize(self):
+ """The maximum size of the cache."""
+ return self.__maxsize
+
+ @property
+ def currsize(self):
+ """The current size of the cache."""
+ return self.__currsize
+
+ @staticmethod
+ def getsizeof(value):
+ """Return the size of a cache element's value."""
+ return 1
+
+
+class FIFOCache(Cache):
+ """First In First Out (FIFO) cache implementation."""
+
+ def __init__(self, maxsize, getsizeof=None):
+ Cache.__init__(self, maxsize, getsizeof)
+ self.__order = collections.OrderedDict()
+
+ def __setitem__(self, key, value, cache_setitem=Cache.__setitem__):
+ cache_setitem(self, key, value)
+ try:
+ self.__order.move_to_end(key)
+ except KeyError:
+ self.__order[key] = None
+
+ def __delitem__(self, key, cache_delitem=Cache.__delitem__):
+ cache_delitem(self, key)
+ del self.__order[key]
+
+ def popitem(self):
+ """Remove and return the `(key, value)` pair first inserted."""
+ try:
+ key = next(iter(self.__order))
+ except StopIteration:
+ raise KeyError("%s is empty" % type(self).__name__) from None
+ else:
+ return (key, self.pop(key))
+
+
+class LFUCache(Cache):
+ """Least Frequently Used (LFU) cache implementation."""
+
+ def __init__(self, maxsize, getsizeof=None):
+ Cache.__init__(self, maxsize, getsizeof)
+ self.__counter = collections.Counter()
+
+ def __getitem__(self, key, cache_getitem=Cache.__getitem__):
+ value = cache_getitem(self, key)
+ if key in self: # __missing__ may not store item
+ self.__counter[key] -= 1
+ return value
+
+ def __setitem__(self, key, value, cache_setitem=Cache.__setitem__):
+ cache_setitem(self, key, value)
+ self.__counter[key] -= 1
+
+ def __delitem__(self, key, cache_delitem=Cache.__delitem__):
+ cache_delitem(self, key)
+ del self.__counter[key]
+
+ def popitem(self):
+ """Remove and return the `(key, value)` pair least frequently used."""
+ try:
+ ((key, _),) = self.__counter.most_common(1)
+ except ValueError:
+ raise KeyError("%s is empty" % type(self).__name__) from None
+ else:
+ return (key, self.pop(key))
+
+
+class LRUCache(Cache):
+ """Least Recently Used (LRU) cache implementation."""
+
+ def __init__(self, maxsize, getsizeof=None):
+ Cache.__init__(self, maxsize, getsizeof)
+ self.__order = collections.OrderedDict()
+
+ def __getitem__(self, key, cache_getitem=Cache.__getitem__):
+ value = cache_getitem(self, key)
+ if key in self: # __missing__ may not store item
+ self.__update(key)
+ return value
+
+ def __setitem__(self, key, value, cache_setitem=Cache.__setitem__):
+ cache_setitem(self, key, value)
+ self.__update(key)
+
+ def __delitem__(self, key, cache_delitem=Cache.__delitem__):
+ cache_delitem(self, key)
+ del self.__order[key]
+
+ def popitem(self):
+ """Remove and return the `(key, value)` pair least recently used."""
+ try:
+ key = next(iter(self.__order))
+ except StopIteration:
+ raise KeyError("%s is empty" % type(self).__name__) from None
+ else:
+ return (key, self.pop(key))
+
+ def __update(self, key):
+ try:
+ self.__order.move_to_end(key)
+ except KeyError:
+ self.__order[key] = None
+
+
+class MRUCache(Cache):
+ """Most Recently Used (MRU) cache implementation."""
+
+ def __init__(self, maxsize, getsizeof=None):
+ Cache.__init__(self, maxsize, getsizeof)
+ self.__order = collections.OrderedDict()
+
+ def __getitem__(self, key, cache_getitem=Cache.__getitem__):
+ value = cache_getitem(self, key)
+ if key in self: # __missing__ may not store item
+ self.__update(key)
+ return value
+
+ def __setitem__(self, key, value, cache_setitem=Cache.__setitem__):
+ cache_setitem(self, key, value)
+ self.__update(key)
+
+ def __delitem__(self, key, cache_delitem=Cache.__delitem__):
+ cache_delitem(self, key)
+ del self.__order[key]
+
+ def popitem(self):
+ """Remove and return the `(key, value)` pair most recently used."""
+ try:
+ key = next(iter(self.__order))
+ except StopIteration:
+ raise KeyError("%s is empty" % type(self).__name__) from None
+ else:
+ return (key, self.pop(key))
+
+ def __update(self, key):
+ try:
+ self.__order.move_to_end(key, last=False)
+ except KeyError:
+ self.__order[key] = None
+
+
+class RRCache(Cache):
+ """Random Replacement (RR) cache implementation."""
+
+ def __init__(self, maxsize, choice=random.choice, getsizeof=None):
+ Cache.__init__(self, maxsize, getsizeof)
+ self.__choice = choice
+
+ @property
+ def choice(self):
+ """The `choice` function used by the cache."""
+ return self.__choice
+
+ def popitem(self):
+ """Remove and return a random `(key, value)` pair."""
+ try:
+ key = self.__choice(list(self))
+ except IndexError:
+ raise KeyError("%s is empty" % type(self).__name__) from None
+ else:
+ return (key, self.pop(key))
+
+
+class _Timer:
+ def __init__(self, timer):
+ self.__timer = timer
+ self.__nesting = 0
+
+ def __call__(self):
+ if self.__nesting == 0:
+ return self.__timer()
+ else:
+ return self.__time
+
+ def __enter__(self):
+ if self.__nesting == 0:
+ self.__time = time = self.__timer()
+ else:
+ time = self.__time
+ self.__nesting += 1
+ return time
+
+ def __exit__(self, *exc):
+ self.__nesting -= 1
+
+ def __reduce__(self):
+ return _Timer, (self.__timer,)
+
+ def __getattr__(self, name):
+ return getattr(self.__timer, name)
+
+
+class _Link:
+
+ __slots__ = ("key", "expire", "next", "prev")
+
+ def __init__(self, key=None, expire=None):
+ self.key = key
+ self.expire = expire
+
+ def __reduce__(self):
+ return _Link, (self.key, self.expire)
+
+ def unlink(self):
+ next = self.next
+ prev = self.prev
+ prev.next = next
+ next.prev = prev
+
+
+class TTLCache(Cache):
+ """LRU Cache implementation with per-item time-to-live (TTL) value."""
+
+ def __init__(self, maxsize, ttl, timer=time.monotonic, getsizeof=None):
+ Cache.__init__(self, maxsize, getsizeof)
+ self.__root = root = _Link()
+ root.prev = root.next = root
+ self.__links = collections.OrderedDict()
+ self.__timer = _Timer(timer)
+ self.__ttl = ttl
+
+ def __contains__(self, key):
+ try:
+ link = self.__links[key] # no reordering
+ except KeyError:
+ return False
+ else:
+ return not (link.expire < self.__timer())
+
+ def __getitem__(self, key, cache_getitem=Cache.__getitem__):
+ try:
+ link = self.__getlink(key)
+ except KeyError:
+ expired = False
+ else:
+ expired = link.expire < self.__timer()
+ if expired:
+ return self.__missing__(key)
+ else:
+ return cache_getitem(self, key)
+
+ def __setitem__(self, key, value, cache_setitem=Cache.__setitem__):
+ with self.__timer as time:
+ self.expire(time)
+ cache_setitem(self, key, value)
+ try:
+ link = self.__getlink(key)
+ except KeyError:
+ self.__links[key] = link = _Link(key)
+ else:
+ link.unlink()
+ link.expire = time + self.__ttl
+ link.next = root = self.__root
+ link.prev = prev = root.prev
+ prev.next = root.prev = link
+
+ def __delitem__(self, key, cache_delitem=Cache.__delitem__):
+ cache_delitem(self, key)
+ link = self.__links.pop(key)
+ link.unlink()
+ if link.expire < self.__timer():
+ raise KeyError(key)
+
+ def __iter__(self):
+ root = self.__root
+ curr = root.next
+ while curr is not root:
+ # "freeze" time for iterator access
+ with self.__timer as time:
+ if not (curr.expire < time):
+ yield curr.key
+ curr = curr.next
+
+ def __len__(self):
+ root = self.__root
+ curr = root.next
+ time = self.__timer()
+ count = len(self.__links)
+ while curr is not root and curr.expire < time:
+ count -= 1
+ curr = curr.next
+ return count
+
+ def __setstate__(self, state):
+ self.__dict__.update(state)
+ root = self.__root
+ root.prev = root.next = root
+ for link in sorted(self.__links.values(), key=lambda obj: obj.expire):
+ link.next = root
+ link.prev = prev = root.prev
+ prev.next = root.prev = link
+ self.expire(self.__timer())
+
+ def __repr__(self, cache_repr=Cache.__repr__):
+ with self.__timer as time:
+ self.expire(time)
+ return cache_repr(self)
+
+ @property
+ def currsize(self):
+ with self.__timer as time:
+ self.expire(time)
+ return super().currsize
+
+ @property
+ def timer(self):
+ """The timer function used by the cache."""
+ return self.__timer
+
+ @property
+ def ttl(self):
+ """The time-to-live value of the cache's items."""
+ return self.__ttl
+
+ def expire(self, time=None):
+ """Remove expired items from the cache."""
+ if time is None:
+ time = self.__timer()
+ root = self.__root
+ curr = root.next
+ links = self.__links
+ cache_delitem = Cache.__delitem__
+ while curr is not root and curr.expire < time:
+ cache_delitem(self, curr.key)
+ del links[curr.key]
+ next = curr.next
+ curr.unlink()
+ curr = next
+
+ def clear(self):
+ with self.__timer as time:
+ self.expire(time)
+ Cache.clear(self)
+
+ def get(self, *args, **kwargs):
+ with self.__timer:
+ return Cache.get(self, *args, **kwargs)
+
+ def pop(self, *args, **kwargs):
+ with self.__timer:
+ return Cache.pop(self, *args, **kwargs)
+
+ def setdefault(self, *args, **kwargs):
+ with self.__timer:
+ return Cache.setdefault(self, *args, **kwargs)
+
+ def popitem(self):
+ """Remove and return the `(key, value)` pair least recently used that
+ has not already expired.
+
+ """
+ with self.__timer as time:
+ self.expire(time)
+ try:
+ key = next(iter(self.__links))
+ except StopIteration:
+ raise KeyError("%s is empty" % type(self).__name__) from None
+ else:
+ return (key, self.pop(key))
+
+ def __getlink(self, key):
+ value = self.__links[key]
+ self.__links.move_to_end(key)
+ return value
+
+
+def cached(cache, key=hashkey, lock=None):
+ """Decorator to wrap a function with a memoizing callable that saves
+ results in a cache.
+
+ """
+
+ def decorator(func):
+ if cache is None:
+
+ def wrapper(*args, **kwargs):
+ return func(*args, **kwargs)
+
+ elif lock is None:
+
+ def wrapper(*args, **kwargs):
+ k = key(*args, **kwargs)
+ try:
+ return cache[k]
+ except KeyError:
+ pass # key not found
+ v = func(*args, **kwargs)
+ try:
+ cache[k] = v
+ except ValueError:
+ pass # value too large
+ return v
+
+ else:
+
+ def wrapper(*args, **kwargs):
+ k = key(*args, **kwargs)
+ try:
+ with lock:
+ return cache[k]
+ except KeyError:
+ pass # key not found
+ v = func(*args, **kwargs)
+ # in case of a race, prefer the item already in the cache
+ try:
+ with lock:
+ return cache.setdefault(k, v)
+ except ValueError:
+ return v # value too large
+
+ return functools.update_wrapper(wrapper, func)
+
+ return decorator
+
+
+def cachedmethod(cache, key=hashkey, lock=None):
+ """Decorator to wrap a class or instance method with a memoizing
+ callable that saves results in a cache.
+
+ """
+
+ def decorator(method):
+ if lock is None:
+
+ def wrapper(self, *args, **kwargs):
+ c = cache(self)
+ if c is None:
+ return method(self, *args, **kwargs)
+ k = key(*args, **kwargs)
+ try:
+ return c[k]
+ except KeyError:
+ pass # key not found
+ v = method(self, *args, **kwargs)
+ try:
+ c[k] = v
+ except ValueError:
+ pass # value too large
+ return v
+
+ else:
+
+ def wrapper(self, *args, **kwargs):
+ c = cache(self)
+ if c is None:
+ return method(self, *args, **kwargs)
+ k = key(*args, **kwargs)
+ try:
+ with lock(self):
+ return c[k]
+ except KeyError:
+ pass # key not found
+ v = method(self, *args, **kwargs)
+ # in case of a race, prefer the item already in the cache
+ try:
+ with lock(self):
+ return c.setdefault(k, v)
+ except ValueError:
+ return v # value too large
+
+ return functools.update_wrapper(wrapper, method)
+
+ return decorator
diff --git a/src/cachetools/cache.py b/src/cachetools/cache.py
new file mode 100644
index 0000000..ee5269d
--- /dev/null
+++ b/src/cachetools/cache.py
@@ -0,0 +1,9 @@
+import warnings
+
+from . import Cache
+
+warnings.warn(
+ "cachetools.cache is deprecated, please use cachetools.Cache",
+ DeprecationWarning,
+ stacklevel=2,
+)
diff --git a/src/cachetools/fifo.py b/src/cachetools/fifo.py
new file mode 100644
index 0000000..a29b789
--- /dev/null
+++ b/src/cachetools/fifo.py
@@ -0,0 +1,9 @@
+import warnings
+
+from . import FIFOCache
+
+warnings.warn(
+ "cachetools.fifo is deprecated, please use cachetools.FIFOCache",
+ DeprecationWarning,
+ stacklevel=2,
+)
diff --git a/src/cachetools/func.py b/src/cachetools/func.py
new file mode 100644
index 0000000..01702c2
--- /dev/null
+++ b/src/cachetools/func.py
@@ -0,0 +1,171 @@
+"""`functools.lru_cache` compatible memoizing function decorators."""
+
+__all__ = ("fifo_cache", "lfu_cache", "lru_cache", "mru_cache", "rr_cache", "ttl_cache")
+
+import collections
+import functools
+import math
+import random
+import time
+
+try:
+ from threading import RLock
+except ImportError: # pragma: no cover
+ from dummy_threading import RLock
+
+from . import FIFOCache, LFUCache, LRUCache, MRUCache, RRCache, TTLCache
+from . import keys
+
+
+_CacheInfo = collections.namedtuple(
+ "CacheInfo", ["hits", "misses", "maxsize", "currsize"]
+)
+
+
+class _UnboundCache(dict):
+ @property
+ def maxsize(self):
+ return None
+
+ @property
+ def currsize(self):
+ return len(self)
+
+
+class _UnboundTTLCache(TTLCache):
+ def __init__(self, ttl, timer):
+ TTLCache.__init__(self, math.inf, ttl, timer)
+
+ @property
+ def maxsize(self):
+ return None
+
+
+def _cache(cache, typed):
+ maxsize = cache.maxsize
+
+ def decorator(func):
+ key = keys.typedkey if typed else keys.hashkey
+ lock = RLock()
+ stats = [0, 0]
+
+ def wrapper(*args, **kwargs):
+ k = key(*args, **kwargs)
+ with lock:
+ try:
+ v = cache[k]
+ stats[0] += 1
+ return v
+ except KeyError:
+ stats[1] += 1
+ v = func(*args, **kwargs)
+ # in case of a race, prefer the item already in the cache
+ try:
+ with lock:
+ return cache.setdefault(k, v)
+ except ValueError:
+ return v # value too large
+
+ def cache_info():
+ with lock:
+ hits, misses = stats
+ maxsize = cache.maxsize
+ currsize = cache.currsize
+ return _CacheInfo(hits, misses, maxsize, currsize)
+
+ def cache_clear():
+ with lock:
+ try:
+ cache.clear()
+ finally:
+ stats[:] = [0, 0]
+
+ wrapper.cache_info = cache_info
+ wrapper.cache_clear = cache_clear
+ wrapper.cache_parameters = lambda: {"maxsize": maxsize, "typed": typed}
+ functools.update_wrapper(wrapper, func)
+ return wrapper
+
+ return decorator
+
+
+def fifo_cache(maxsize=128, typed=False):
+ """Decorator to wrap a function with a memoizing callable that saves
+ up to `maxsize` results based on a First In First Out (FIFO)
+ algorithm.
+
+ """
+ if maxsize is None:
+ return _cache(_UnboundCache(), typed)
+ elif callable(maxsize):
+ return _cache(FIFOCache(128), typed)(maxsize)
+ else:
+ return _cache(FIFOCache(maxsize), typed)
+
+
+def lfu_cache(maxsize=128, typed=False):
+ """Decorator to wrap a function with a memoizing callable that saves
+ up to `maxsize` results based on a Least Frequently Used (LFU)
+ algorithm.
+
+ """
+ if maxsize is None:
+ return _cache(_UnboundCache(), typed)
+ elif callable(maxsize):
+ return _cache(LFUCache(128), typed)(maxsize)
+ else:
+ return _cache(LFUCache(maxsize), typed)
+
+
+def lru_cache(maxsize=128, typed=False):
+ """Decorator to wrap a function with a memoizing callable that saves
+ up to `maxsize` results based on a Least Recently Used (LRU)
+ algorithm.
+
+ """
+ if maxsize is None:
+ return _cache(_UnboundCache(), typed)
+ elif callable(maxsize):
+ return _cache(LRUCache(128), typed)(maxsize)
+ else:
+ return _cache(LRUCache(maxsize), typed)
+
+
+def mru_cache(maxsize=128, typed=False):
+ """Decorator to wrap a function with a memoizing callable that saves
+ up to `maxsize` results based on a Most Recently Used (MRU)
+ algorithm.
+ """
+ if maxsize is None:
+ return _cache(_UnboundCache(), typed)
+ elif callable(maxsize):
+ return _cache(MRUCache(128), typed)(maxsize)
+ else:
+ return _cache(MRUCache(maxsize), typed)
+
+
+def rr_cache(maxsize=128, choice=random.choice, typed=False):
+ """Decorator to wrap a function with a memoizing callable that saves
+ up to `maxsize` results based on a Random Replacement (RR)
+ algorithm.
+
+ """
+ if maxsize is None:
+ return _cache(_UnboundCache(), typed)
+ elif callable(maxsize):
+ return _cache(RRCache(128, choice), typed)(maxsize)
+ else:
+ return _cache(RRCache(maxsize, choice), typed)
+
+
+def ttl_cache(maxsize=128, ttl=600, timer=time.monotonic, typed=False):
+ """Decorator to wrap a function with a memoizing callable that saves
+ up to `maxsize` results based on a Least Recently Used (LRU)
+ algorithm with a per-item time-to-live (TTL) value.
+ """
+ if maxsize is None:
+ return _cache(_UnboundTTLCache(ttl, timer), typed)
+ elif callable(maxsize):
+ return _cache(TTLCache(128, ttl, timer), typed)(maxsize)
+ else:
+ return _cache(TTLCache(maxsize, ttl, timer), typed)
diff --git a/src/cachetools/keys.py b/src/cachetools/keys.py
new file mode 100644
index 0000000..13630a4
--- /dev/null
+++ b/src/cachetools/keys.py
@@ -0,0 +1,52 @@
+"""Key functions for memoizing decorators."""
+
+__all__ = ("hashkey", "typedkey")
+
+
+class _HashedTuple(tuple):
+ """A tuple that ensures that hash() will be called no more than once
+ per element, since cache decorators will hash the key multiple
+ times on a cache miss. See also _HashedSeq in the standard
+ library functools implementation.
+
+ """
+
+ __hashvalue = None
+
+ def __hash__(self, hash=tuple.__hash__):
+ hashvalue = self.__hashvalue
+ if hashvalue is None:
+ self.__hashvalue = hashvalue = hash(self)
+ return hashvalue
+
+ def __add__(self, other, add=tuple.__add__):
+ return _HashedTuple(add(self, other))
+
+ def __radd__(self, other, add=tuple.__add__):
+ return _HashedTuple(add(other, self))
+
+ def __getstate__(self):
+ return {}
+
+
+# used for separating keyword arguments; we do not use an object
+# instance here so identity is preserved when pickling/unpickling
+_kwmark = (_HashedTuple,)
+
+
+def hashkey(*args, **kwargs):
+ """Return a cache key for the specified hashable arguments."""
+
+ if kwargs:
+ return _HashedTuple(args + sum(sorted(kwargs.items()), _kwmark))
+ else:
+ return _HashedTuple(args)
+
+
+def typedkey(*args, **kwargs):
+ """Return a typed cache key for the specified hashable arguments."""
+
+ key = hashkey(*args, **kwargs)
+ key += tuple(type(v) for v in args)
+ key += tuple(type(v) for _, v in sorted(kwargs.items()))
+ return key
diff --git a/src/cachetools/lfu.py b/src/cachetools/lfu.py
new file mode 100644
index 0000000..5c9acef
--- /dev/null
+++ b/src/cachetools/lfu.py
@@ -0,0 +1,9 @@
+import warnings
+
+from . import LFUCache
+
+warnings.warn(
+ "cachetools.lfu is deprecated, please use cachetools.LFUCache",
+ DeprecationWarning,
+ stacklevel=2,
+)
diff --git a/src/cachetools/lru.py b/src/cachetools/lru.py
new file mode 100644
index 0000000..48ddb36
--- /dev/null
+++ b/src/cachetools/lru.py
@@ -0,0 +1,9 @@
+import warnings
+
+from . import LRUCache
+
+warnings.warn(
+ "cachetools.lru is deprecated, please use cachetools.LRUCache",
+ DeprecationWarning,
+ stacklevel=2,
+)
diff --git a/src/cachetools/mru.py b/src/cachetools/mru.py
new file mode 100644
index 0000000..a486dc4
--- /dev/null
+++ b/src/cachetools/mru.py
@@ -0,0 +1,9 @@
+import warnings
+
+from . import MRUCache
+
+warnings.warn(
+ "cachetools.mru is deprecated, please use cachetools.MRUCache",
+ DeprecationWarning,
+ stacklevel=2,
+)
diff --git a/src/cachetools/rr.py b/src/cachetools/rr.py
new file mode 100644
index 0000000..81331bc
--- /dev/null
+++ b/src/cachetools/rr.py
@@ -0,0 +1,9 @@
+import warnings
+
+from . import RRCache
+
+warnings.warn(
+ "cachetools.rr is deprecated, please use cachetools.RRCache",
+ DeprecationWarning,
+ stacklevel=2,
+)
diff --git a/src/cachetools/ttl.py b/src/cachetools/ttl.py
new file mode 100644
index 0000000..ef343da
--- /dev/null
+++ b/src/cachetools/ttl.py
@@ -0,0 +1,9 @@
+import warnings
+
+from . import TTLCache
+
+warnings.warn(
+ "cachetools.ttl is deprecated, please use cachetools.TTLCache",
+ DeprecationWarning,
+ stacklevel=2,
+)
diff --git a/tests/__init__.py b/tests/__init__.py
new file mode 100644
index 0000000..35ac81a
--- /dev/null
+++ b/tests/__init__.py
@@ -0,0 +1,304 @@
+import sys
+import unittest
+
+
+class CacheTestMixin:
+
+ Cache = None
+
+ def test_defaults(self):
+ cache = self.Cache(maxsize=1)
+ self.assertEqual(0, len(cache))
+ self.assertEqual(1, cache.maxsize)
+ self.assertEqual(0, cache.currsize)
+ self.assertEqual(1, cache.getsizeof(None))
+ self.assertEqual(1, cache.getsizeof(""))
+ self.assertEqual(1, cache.getsizeof(0))
+ self.assertTrue(repr(cache).startswith(cache.__class__.__name__))
+
+ def test_insert(self):
+ cache = self.Cache(maxsize=2)
+
+ cache.update({1: 1, 2: 2})
+ self.assertEqual(2, len(cache))
+ self.assertEqual(1, cache[1])
+ self.assertEqual(2, cache[2])
+
+ cache[3] = 3
+ self.assertEqual(2, len(cache))
+ self.assertEqual(3, cache[3])
+ self.assertTrue(1 in cache or 2 in cache)
+
+ cache[4] = 4
+ self.assertEqual(2, len(cache))
+ self.assertEqual(4, cache[4])
+ self.assertTrue(1 in cache or 2 in cache or 3 in cache)
+
+ def test_update(self):
+ cache = self.Cache(maxsize=2)
+
+ cache.update({1: 1, 2: 2})
+ self.assertEqual(2, len(cache))
+ self.assertEqual(1, cache[1])
+ self.assertEqual(2, cache[2])
+
+ cache.update({1: 1, 2: 2})
+ self.assertEqual(2, len(cache))
+ self.assertEqual(1, cache[1])
+ self.assertEqual(2, cache[2])
+
+ cache.update({1: "a", 2: "b"})
+ self.assertEqual(2, len(cache))
+ self.assertEqual("a", cache[1])
+ self.assertEqual("b", cache[2])
+
+ def test_delete(self):
+ cache = self.Cache(maxsize=2)
+
+ cache.update({1: 1, 2: 2})
+ self.assertEqual(2, len(cache))
+ self.assertEqual(1, cache[1])
+ self.assertEqual(2, cache[2])
+
+ del cache[2]
+ self.assertEqual(1, len(cache))
+ self.assertEqual(1, cache[1])
+ self.assertNotIn(2, cache)
+
+ del cache[1]
+ self.assertEqual(0, len(cache))
+ self.assertNotIn(1, cache)
+ self.assertNotIn(2, cache)
+
+ with self.assertRaises(KeyError):
+ del cache[1]
+ self.assertEqual(0, len(cache))
+ self.assertNotIn(1, cache)
+ self.assertNotIn(2, cache)
+
+ def test_pop(self):
+ cache = self.Cache(maxsize=2)
+
+ cache.update({1: 1, 2: 2})
+ self.assertEqual(2, cache.pop(2))
+ self.assertEqual(1, len(cache))
+ self.assertEqual(1, cache.pop(1))
+ self.assertEqual(0, len(cache))
+
+ with self.assertRaises(KeyError):
+ cache.pop(2)
+ with self.assertRaises(KeyError):
+ cache.pop(1)
+ with self.assertRaises(KeyError):
+ cache.pop(0)
+
+ self.assertEqual(None, cache.pop(2, None))
+ self.assertEqual(None, cache.pop(1, None))
+ self.assertEqual(None, cache.pop(0, None))
+
+ def test_popitem(self):
+ cache = self.Cache(maxsize=2)
+
+ cache.update({1: 1, 2: 2})
+ self.assertIn(cache.pop(1), {1: 1, 2: 2})
+ self.assertEqual(1, len(cache))
+ self.assertIn(cache.pop(2), {1: 1, 2: 2})
+ self.assertEqual(0, len(cache))
+
+ with self.assertRaises(KeyError):
+ cache.popitem()
+
+ @unittest.skipUnless(sys.version_info >= (3, 7), "requires Python 3.7")
+ def test_popitem_exception_context(self):
+ # since Python 3.7, MutableMapping.popitem() suppresses
+ # exception context as implementation detail
+ exception = None
+ try:
+ self.Cache(maxsize=2).popitem()
+ except Exception as e:
+ exception = e
+ self.assertIsNone(exception.__cause__)
+ self.assertTrue(exception.__suppress_context__)
+
+ def test_missing(self):
+ class DefaultCache(self.Cache):
+ def __missing__(self, key):
+ self[key] = key
+ return key
+
+ cache = DefaultCache(maxsize=2)
+
+ self.assertEqual(0, cache.currsize)
+ self.assertEqual(2, cache.maxsize)
+ self.assertEqual(0, len(cache))
+ self.assertEqual(1, cache[1])
+ self.assertEqual(2, cache[2])
+ self.assertEqual(2, len(cache))
+ self.assertTrue(1 in cache and 2 in cache)
+
+ self.assertEqual(3, cache[3])
+ self.assertEqual(2, len(cache))
+ self.assertTrue(3 in cache)
+ self.assertTrue(1 in cache or 2 in cache)
+ self.assertTrue(1 not in cache or 2 not in cache)
+
+ self.assertEqual(4, cache[4])
+ self.assertEqual(2, len(cache))
+ self.assertTrue(4 in cache)
+ self.assertTrue(1 in cache or 2 in cache or 3 in cache)
+
+ # verify __missing__() is *not* called for any operations
+ # besides __getitem__()
+
+ self.assertEqual(4, cache.get(4))
+ self.assertEqual(None, cache.get(5))
+ self.assertEqual(5 * 5, cache.get(5, 5 * 5))
+ self.assertEqual(2, len(cache))
+
+ self.assertEqual(4, cache.pop(4))
+ with self.assertRaises(KeyError):
+ cache.pop(5)
+ self.assertEqual(None, cache.pop(5, None))
+ self.assertEqual(5 * 5, cache.pop(5, 5 * 5))
+ self.assertEqual(1, len(cache))
+
+ cache.clear()
+ cache[1] = 1 + 1
+ self.assertEqual(1 + 1, cache.setdefault(1))
+ self.assertEqual(1 + 1, cache.setdefault(1, 1))
+ self.assertEqual(1 + 1, cache[1])
+ self.assertEqual(2 + 2, cache.setdefault(2, 2 + 2))
+ self.assertEqual(2 + 2, cache.setdefault(2, None))
+ self.assertEqual(2 + 2, cache.setdefault(2))
+ self.assertEqual(2 + 2, cache[2])
+ self.assertEqual(2, len(cache))
+ self.assertTrue(1 in cache and 2 in cache)
+ self.assertEqual(None, cache.setdefault(3))
+ self.assertEqual(2, len(cache))
+ self.assertTrue(3 in cache)
+ self.assertTrue(1 in cache or 2 in cache)
+ self.assertTrue(1 not in cache or 2 not in cache)
+
+ def test_missing_getsizeof(self):
+ class DefaultCache(self.Cache):
+ def __missing__(self, key):
+ try:
+ self[key] = key
+ except ValueError:
+ pass # not stored
+ return key
+
+ cache = DefaultCache(maxsize=2, getsizeof=lambda x: x)
+
+ self.assertEqual(0, cache.currsize)
+ self.assertEqual(2, cache.maxsize)
+
+ self.assertEqual(1, cache[1])
+ self.assertEqual(1, len(cache))
+ self.assertEqual(1, cache.currsize)
+ self.assertIn(1, cache)
+
+ self.assertEqual(2, cache[2])
+ self.assertEqual(1, len(cache))
+ self.assertEqual(2, cache.currsize)
+ self.assertNotIn(1, cache)
+ self.assertIn(2, cache)
+
+ self.assertEqual(3, cache[3]) # not stored
+ self.assertEqual(1, len(cache))
+ self.assertEqual(2, cache.currsize)
+ self.assertEqual(1, cache[1])
+ self.assertEqual(1, len(cache))
+ self.assertEqual(1, cache.currsize)
+ self.assertEqual((1, 1), cache.popitem())
+
+ def _test_getsizeof(self, cache):
+ self.assertEqual(0, cache.currsize)
+ self.assertEqual(3, cache.maxsize)
+ self.assertEqual(1, cache.getsizeof(1))
+ self.assertEqual(2, cache.getsizeof(2))
+ self.assertEqual(3, cache.getsizeof(3))
+
+ cache.update({1: 1, 2: 2})
+ self.assertEqual(2, len(cache))
+ self.assertEqual(3, cache.currsize)
+ self.assertEqual(1, cache[1])
+ self.assertEqual(2, cache[2])
+
+ cache[1] = 2
+ self.assertEqual(1, len(cache))
+ self.assertEqual(2, cache.currsize)
+ self.assertEqual(2, cache[1])
+ self.assertNotIn(2, cache)
+
+ cache.update({1: 1, 2: 2})
+ self.assertEqual(2, len(cache))
+ self.assertEqual(3, cache.currsize)
+ self.assertEqual(1, cache[1])
+ self.assertEqual(2, cache[2])
+
+ cache[3] = 3
+ self.assertEqual(1, len(cache))
+ self.assertEqual(3, cache.currsize)
+ self.assertEqual(3, cache[3])
+ self.assertNotIn(1, cache)
+ self.assertNotIn(2, cache)
+
+ with self.assertRaises(ValueError):
+ cache[3] = 4
+ self.assertEqual(1, len(cache))
+ self.assertEqual(3, cache.currsize)
+ self.assertEqual(3, cache[3])
+
+ with self.assertRaises(ValueError):
+ cache[4] = 4
+ self.assertEqual(1, len(cache))
+ self.assertEqual(3, cache.currsize)
+ self.assertEqual(3, cache[3])
+
+ def test_getsizeof_param(self):
+ self._test_getsizeof(self.Cache(maxsize=3, getsizeof=lambda x: x))
+
+ def test_getsizeof_subclass(self):
+ class Cache(self.Cache):
+ def getsizeof(self, value):
+ return value
+
+ self._test_getsizeof(Cache(maxsize=3))
+
+ def test_pickle(self):
+ import pickle
+
+ source = self.Cache(maxsize=2)
+ source.update({1: 1, 2: 2})
+
+ cache = pickle.loads(pickle.dumps(source))
+ self.assertEqual(source, cache)
+
+ self.assertEqual(2, len(cache))
+ self.assertEqual(1, cache[1])
+ self.assertEqual(2, cache[2])
+
+ cache[3] = 3
+ self.assertEqual(2, len(cache))
+ self.assertEqual(3, cache[3])
+ self.assertTrue(1 in cache or 2 in cache)
+
+ cache[4] = 4
+ self.assertEqual(2, len(cache))
+ self.assertEqual(4, cache[4])
+ self.assertTrue(1 in cache or 2 in cache or 3 in cache)
+
+ self.assertEqual(cache, pickle.loads(pickle.dumps(cache)))
+
+ def test_pickle_maxsize(self):
+ import pickle
+ import sys
+
+ # test empty cache, single element, large cache (recursion limit)
+ for n in [0, 1, sys.getrecursionlimit() * 2]:
+ source = self.Cache(maxsize=n)
+ source.update((i, i) for i in range(n))
+ cache = pickle.loads(pickle.dumps(source))
+ self.assertEqual(n, len(cache))
+ self.assertEqual(source, cache)
diff --git a/tests/test_cache.py b/tests/test_cache.py
new file mode 100644
index 0000000..ef87877
--- /dev/null
+++ b/tests/test_cache.py
@@ -0,0 +1,10 @@
+import unittest
+
+import cachetools
+
+from . import CacheTestMixin
+
+
+class CacheTest(unittest.TestCase, CacheTestMixin):
+
+ Cache = cachetools.Cache
diff --git a/tests/test_deprecated.py b/tests/test_deprecated.py
new file mode 100644
index 0000000..6510c82
--- /dev/null
+++ b/tests/test_deprecated.py
@@ -0,0 +1,67 @@
+import unittest
+import warnings
+
+
+class DeprecatedTest(unittest.TestCase):
+ def test_cache(self):
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter("always")
+ from cachetools.cache import Cache
+
+ assert len(w) == 1
+ assert issubclass(w[-1].category, DeprecationWarning)
+ assert "cachetools.cache" in str(w[-1].message)
+
+ def test_fifo(self):
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter("always")
+ from cachetools.fifo import FIFOCache
+
+ assert len(w) == 1
+ assert issubclass(w[-1].category, DeprecationWarning)
+ assert "cachetools.fifo" in str(w[-1].message)
+
+ def test_lfu(self):
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter("always")
+ from cachetools.lfu import LFUCache
+
+ assert len(w) == 1
+ assert issubclass(w[-1].category, DeprecationWarning)
+ assert "cachetools.lfu" in str(w[-1].message)
+
+ def test_lru(self):
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter("always")
+ from cachetools.lru import LRUCache
+
+ assert len(w) == 1
+ assert issubclass(w[-1].category, DeprecationWarning)
+ assert "cachetools.lru" in str(w[-1].message)
+
+ def test_mru(self):
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter("always")
+ from cachetools.mru import MRUCache
+
+ assert len(w) == 1
+ assert issubclass(w[-1].category, DeprecationWarning)
+ assert "cachetools.mru" in str(w[-1].message)
+
+ def test_rr(self):
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter("always")
+ from cachetools.rr import RRCache
+
+ assert len(w) == 1
+ assert issubclass(w[-1].category, DeprecationWarning)
+ assert "cachetools.rr" in str(w[-1].message)
+
+ def test_ttl(self):
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter("always")
+ from cachetools.ttl import TTLCache
+
+ assert len(w) == 1
+ assert issubclass(w[-1].category, DeprecationWarning)
+ assert "cachetools.ttl" in str(w[-1].message)
diff --git a/tests/test_fifo.py b/tests/test_fifo.py
new file mode 100644
index 0000000..933af56
--- /dev/null
+++ b/tests/test_fifo.py
@@ -0,0 +1,57 @@
+import unittest
+
+from cachetools import FIFOCache
+
+from . import CacheTestMixin
+
+
+class LRUCacheTest(unittest.TestCase, CacheTestMixin):
+
+ Cache = FIFOCache
+
+ def test_fifo(self):
+ cache = FIFOCache(maxsize=2)
+
+ cache[1] = 1
+ cache[2] = 2
+ cache[3] = 3
+
+ self.assertEqual(len(cache), 2)
+ self.assertEqual(cache[2], 2)
+ self.assertEqual(cache[3], 3)
+ self.assertNotIn(1, cache)
+
+ cache[2]
+ cache[4] = 4
+ self.assertEqual(len(cache), 2)
+ self.assertEqual(cache[3], 3)
+ self.assertEqual(cache[4], 4)
+ self.assertNotIn(2, cache)
+
+ cache[5] = 5
+ self.assertEqual(len(cache), 2)
+ self.assertEqual(cache[4], 4)
+ self.assertEqual(cache[5], 5)
+ self.assertNotIn(3, cache)
+
+ def test_fifo_getsizeof(self):
+ cache = FIFOCache(maxsize=3, getsizeof=lambda x: x)
+
+ cache[1] = 1
+ cache[2] = 2
+
+ self.assertEqual(len(cache), 2)
+ self.assertEqual(cache[1], 1)
+ self.assertEqual(cache[2], 2)
+
+ cache[3] = 3
+
+ self.assertEqual(len(cache), 1)
+ self.assertEqual(cache[3], 3)
+ self.assertNotIn(1, cache)
+ self.assertNotIn(2, cache)
+
+ with self.assertRaises(ValueError):
+ cache[4] = 4
+ self.assertEqual(len(cache), 1)
+ self.assertEqual(cache[3], 3)
diff --git a/tests/test_func.py b/tests/test_func.py
new file mode 100644
index 0000000..72e7589
--- /dev/null
+++ b/tests/test_func.py
@@ -0,0 +1,129 @@
+import unittest
+
+import cachetools.func
+
+
+class DecoratorTestMixin:
+ def decorator(self, maxsize, **kwargs):
+ return self.DECORATOR(maxsize, **kwargs)
+
+ def test_decorator(self):
+ cached = self.decorator(maxsize=2)(lambda n: n)
+ self.assertEqual(cached.cache_parameters(), {"maxsize": 2, "typed": False})
+ self.assertEqual(cached.cache_info(), (0, 0, 2, 0))
+ self.assertEqual(cached(1), 1)
+ self.assertEqual(cached.cache_info(), (0, 1, 2, 1))
+ self.assertEqual(cached(1), 1)
+ self.assertEqual(cached.cache_info(), (1, 1, 2, 1))
+ self.assertEqual(cached(1.0), 1.0)
+ self.assertEqual(cached.cache_info(), (2, 1, 2, 1))
+
+ def test_decorator_clear(self):
+ cached = self.decorator(maxsize=2)(lambda n: n)
+ self.assertEqual(cached.cache_parameters(), {"maxsize": 2, "typed": False})
+ self.assertEqual(cached.cache_info(), (0, 0, 2, 0))
+ self.assertEqual(cached(1), 1)
+ self.assertEqual(cached.cache_info(), (0, 1, 2, 1))
+ cached.cache_clear()
+ self.assertEqual(cached.cache_info(), (0, 0, 2, 0))
+ self.assertEqual(cached(1), 1)
+ self.assertEqual(cached.cache_info(), (0, 1, 2, 1))
+
+ def test_decorator_nocache(self):
+ cached = self.decorator(maxsize=0)(lambda n: n)
+ self.assertEqual(cached.cache_parameters(), {"maxsize": 0, "typed": False})
+ self.assertEqual(cached.cache_info(), (0, 0, 0, 0))
+ self.assertEqual(cached(1), 1)
+ self.assertEqual(cached.cache_info(), (0, 1, 0, 0))
+ self.assertEqual(cached(1), 1)
+ self.assertEqual(cached.cache_info(), (0, 2, 0, 0))
+ self.assertEqual(cached(1.0), 1.0)
+ self.assertEqual(cached.cache_info(), (0, 3, 0, 0))
+
+ def test_decorator_unbound(self):
+ cached = self.decorator(maxsize=None)(lambda n: n)
+ self.assertEqual(cached.cache_parameters(), {"maxsize": None, "typed": False})
+ self.assertEqual(cached.cache_info(), (0, 0, None, 0))
+ self.assertEqual(cached(1), 1)
+ self.assertEqual(cached.cache_info(), (0, 1, None, 1))
+ self.assertEqual(cached(1), 1)
+ self.assertEqual(cached.cache_info(), (1, 1, None, 1))
+ self.assertEqual(cached(1.0), 1.0)
+ self.assertEqual(cached.cache_info(), (2, 1, None, 1))
+
+ def test_decorator_typed(self):
+ cached = self.decorator(maxsize=2, typed=True)(lambda n: n)
+ self.assertEqual(cached.cache_parameters(), {"maxsize": 2, "typed": True})
+ self.assertEqual(cached.cache_info(), (0, 0, 2, 0))
+ self.assertEqual(cached(1), 1)
+ self.assertEqual(cached.cache_info(), (0, 1, 2, 1))
+ self.assertEqual(cached(1), 1)
+ self.assertEqual(cached.cache_info(), (1, 1, 2, 1))
+ self.assertEqual(cached(1.0), 1.0)
+ self.assertEqual(cached.cache_info(), (1, 2, 2, 2))
+ self.assertEqual(cached(1.0), 1.0)
+ self.assertEqual(cached.cache_info(), (2, 2, 2, 2))
+
+ def test_decorator_user_function(self):
+ cached = self.decorator(lambda n: n)
+ self.assertEqual(cached.cache_parameters(), {"maxsize": 128, "typed": False})
+ self.assertEqual(cached.cache_info(), (0, 0, 128, 0))
+ self.assertEqual(cached(1), 1)
+ self.assertEqual(cached.cache_info(), (0, 1, 128, 1))
+ self.assertEqual(cached(1), 1)
+ self.assertEqual(cached.cache_info(), (1, 1, 128, 1))
+ self.assertEqual(cached(1.0), 1.0)
+ self.assertEqual(cached.cache_info(), (2, 1, 128, 1))
+
+ def test_decorator_needs_rlock(self):
+ cached = self.decorator(lambda n: n)
+
+ class RecursiveEquals:
+ def __init__(self, use_cache):
+ self._use_cache = use_cache
+
+ def __hash__(self):
+ return hash(self._use_cache)
+
+ def __eq__(self, other):
+ if self._use_cache:
+ # This call will happen while the cache-lock is held,
+ # requiring a reentrant lock to avoid deadlock.
+ cached(self)
+ return self._use_cache == other._use_cache
+
+ # Prime the cache.
+ cached(RecursiveEquals(False))
+ cached(RecursiveEquals(True))
+ # Then do a call which will cause a deadlock with a non-reentrant lock.
+ self.assertEqual(cached(RecursiveEquals(True)), RecursiveEquals(True))
+
+
+class FIFODecoratorTest(unittest.TestCase, DecoratorTestMixin):
+
+ DECORATOR = staticmethod(cachetools.func.fifo_cache)
+
+
+class LFUDecoratorTest(unittest.TestCase, DecoratorTestMixin):
+
+ DECORATOR = staticmethod(cachetools.func.lfu_cache)
+
+
+class LRUDecoratorTest(unittest.TestCase, DecoratorTestMixin):
+
+ DECORATOR = staticmethod(cachetools.func.lru_cache)
+
+
+class MRUDecoratorTest(unittest.TestCase, DecoratorTestMixin):
+
+ DECORATOR = staticmethod(cachetools.func.mru_cache)
+
+
+class RRDecoratorTest(unittest.TestCase, DecoratorTestMixin):
+
+ DECORATOR = staticmethod(cachetools.func.rr_cache)
+
+
+class TTLDecoratorTest(unittest.TestCase, DecoratorTestMixin):
+
+ DECORATOR = staticmethod(cachetools.func.ttl_cache)
diff --git a/tests/test_keys.py b/tests/test_keys.py
new file mode 100644
index 0000000..892a620
--- /dev/null
+++ b/tests/test_keys.py
@@ -0,0 +1,57 @@
+import unittest
+
+import cachetools.keys
+
+
+class CacheKeysTest(unittest.TestCase):
+ def test_hashkey(self, key=cachetools.keys.hashkey):
+ self.assertEqual(key(), key())
+ self.assertEqual(hash(key()), hash(key()))
+ self.assertEqual(key(1, 2, 3), key(1, 2, 3))
+ self.assertEqual(hash(key(1, 2, 3)), hash(key(1, 2, 3)))
+ self.assertEqual(key(1, 2, 3, x=0), key(1, 2, 3, x=0))
+ self.assertEqual(hash(key(1, 2, 3, x=0)), hash(key(1, 2, 3, x=0)))
+ self.assertNotEqual(key(1, 2, 3), key(3, 2, 1))
+ self.assertNotEqual(key(1, 2, 3), key(1, 2, 3, x=None))
+ self.assertNotEqual(key(1, 2, 3, x=0), key(1, 2, 3, x=None))
+ self.assertNotEqual(key(1, 2, 3, x=0), key(1, 2, 3, y=0))
+ with self.assertRaises(TypeError):
+ hash(key({}))
+ # untyped keys compare equal
+ self.assertEqual(key(1, 2, 3), key(1.0, 2.0, 3.0))
+ self.assertEqual(hash(key(1, 2, 3)), hash(key(1.0, 2.0, 3.0)))
+
+ def test_typedkey(self, key=cachetools.keys.typedkey):
+ self.assertEqual(key(), key())
+ self.assertEqual(hash(key()), hash(key()))
+ self.assertEqual(key(1, 2, 3), key(1, 2, 3))
+ self.assertEqual(hash(key(1, 2, 3)), hash(key(1, 2, 3)))
+ self.assertEqual(key(1, 2, 3, x=0), key(1, 2, 3, x=0))
+ self.assertEqual(hash(key(1, 2, 3, x=0)), hash(key(1, 2, 3, x=0)))
+ self.assertNotEqual(key(1, 2, 3), key(3, 2, 1))
+ self.assertNotEqual(key(1, 2, 3), key(1, 2, 3, x=None))
+ self.assertNotEqual(key(1, 2, 3, x=0), key(1, 2, 3, x=None))
+ self.assertNotEqual(key(1, 2, 3, x=0), key(1, 2, 3, y=0))
+ with self.assertRaises(TypeError):
+ hash(key({}))
+ # typed keys compare unequal
+ self.assertNotEqual(key(1, 2, 3), key(1.0, 2.0, 3.0))
+
+ def test_addkeys(self, key=cachetools.keys.hashkey):
+ self.assertIsInstance(key(), tuple)
+ self.assertIsInstance(key(1, 2, 3) + key(4, 5, 6), type(key()))
+ self.assertIsInstance(key(1, 2, 3) + (4, 5, 6), type(key()))
+ self.assertIsInstance((1, 2, 3) + key(4, 5, 6), type(key()))
+
+ def test_pickle(self, key=cachetools.keys.hashkey):
+ import pickle
+
+ for k in [key(), key("abc"), key("abc", 123), key("abc", q="abc")]:
+ # white-box test: assert cached hash value is not pickled
+ self.assertEqual(len(k.__dict__), 0)
+ h = hash(k)
+ self.assertEqual(len(k.__dict__), 1)
+ pickled = pickle.loads(pickle.dumps(k))
+ self.assertEqual(len(pickled.__dict__), 0)
+ self.assertEqual(k, pickled)
+ self.assertEqual(h, hash(pickled))
diff --git a/tests/test_lfu.py b/tests/test_lfu.py
new file mode 100644
index 0000000..6679a88
--- /dev/null
+++ b/tests/test_lfu.py
@@ -0,0 +1,50 @@
+import unittest
+
+from cachetools import LFUCache
+
+from . import CacheTestMixin
+
+
+class LFUCacheTest(unittest.TestCase, CacheTestMixin):
+
+ Cache = LFUCache
+
+ def test_lfu(self):
+ cache = LFUCache(maxsize=2)
+
+ cache[1] = 1
+ cache[1]
+ cache[2] = 2
+ cache[3] = 3
+
+ self.assertEqual(len(cache), 2)
+ self.assertEqual(cache[1], 1)
+ self.assertTrue(2 in cache or 3 in cache)
+ self.assertTrue(2 not in cache or 3 not in cache)
+
+ cache[4] = 4
+ self.assertEqual(len(cache), 2)
+ self.assertEqual(cache[4], 4)
+ self.assertEqual(cache[1], 1)
+
+ def test_lfu_getsizeof(self):
+ cache = LFUCache(maxsize=3, getsizeof=lambda x: x)
+
+ cache[1] = 1
+ cache[2] = 2
+
+ self.assertEqual(len(cache), 2)
+ self.assertEqual(cache[1], 1)
+ self.assertEqual(cache[2], 2)
+
+ cache[3] = 3
+
+ self.assertEqual(len(cache), 1)
+ self.assertEqual(cache[3], 3)
+ self.assertNotIn(1, cache)
+ self.assertNotIn(2, cache)
+
+ with self.assertRaises(ValueError):
+ cache[4] = 4
+ self.assertEqual(len(cache), 1)
+ self.assertEqual(cache[3], 3)
diff --git a/tests/test_lru.py b/tests/test_lru.py
new file mode 100644
index 0000000..fe97803
--- /dev/null
+++ b/tests/test_lru.py
@@ -0,0 +1,57 @@
+import unittest
+
+from cachetools import LRUCache
+
+from . import CacheTestMixin
+
+
+class LRUCacheTest(unittest.TestCase, CacheTestMixin):
+
+ Cache = LRUCache
+
+ def test_lru(self):
+ cache = LRUCache(maxsize=2)
+
+ cache[1] = 1
+ cache[2] = 2
+ cache[3] = 3
+
+ self.assertEqual(len(cache), 2)
+ self.assertEqual(cache[2], 2)
+ self.assertEqual(cache[3], 3)
+ self.assertNotIn(1, cache)
+
+ cache[2]
+ cache[4] = 4
+ self.assertEqual(len(cache), 2)
+ self.assertEqual(cache[2], 2)
+ self.assertEqual(cache[4], 4)
+ self.assertNotIn(3, cache)
+
+ cache[5] = 5
+ self.assertEqual(len(cache), 2)
+ self.assertEqual(cache[4], 4)
+ self.assertEqual(cache[5], 5)
+ self.assertNotIn(2, cache)
+
+ def test_lru_getsizeof(self):
+ cache = LRUCache(maxsize=3, getsizeof=lambda x: x)
+
+ cache[1] = 1
+ cache[2] = 2
+
+ self.assertEqual(len(cache), 2)
+ self.assertEqual(cache[1], 1)
+ self.assertEqual(cache[2], 2)
+
+ cache[3] = 3
+
+ self.assertEqual(len(cache), 1)
+ self.assertEqual(cache[3], 3)
+ self.assertNotIn(1, cache)
+ self.assertNotIn(2, cache)
+
+ with self.assertRaises(ValueError):
+ cache[4] = 4
+ self.assertEqual(len(cache), 1)
+ self.assertEqual(cache[3], 3)
diff --git a/tests/test_method.py b/tests/test_method.py
new file mode 100644
index 0000000..b41dac0
--- /dev/null
+++ b/tests/test_method.py
@@ -0,0 +1,165 @@
+import operator
+import unittest
+
+from cachetools import LRUCache, cachedmethod, keys
+
+
+class Cached:
+ def __init__(self, cache, count=0):
+ self.cache = cache
+ self.count = count
+
+ @cachedmethod(operator.attrgetter("cache"))
+ def get(self, value):
+ count = self.count
+ self.count += 1
+ return count
+
+ @cachedmethod(operator.attrgetter("cache"), key=keys.typedkey)
+ def get_typed(self, value):
+ count = self.count
+ self.count += 1
+ return count
+
+ # https://github.com/tkem/cachetools/issues/107
+ def __hash__(self):
+ raise TypeError("unhashable type")
+
+
+class Locked:
+ def __init__(self, cache):
+ self.cache = cache
+ self.count = 0
+
+ @cachedmethod(operator.attrgetter("cache"), lock=lambda self: self)
+ def get(self, value):
+ return self.count
+
+ def __enter__(self):
+ self.count += 1
+
+ def __exit__(self, *exc):
+ pass
+
+
+class CachedMethodTest(unittest.TestCase):
+ def test_dict(self):
+ cached = Cached({})
+
+ self.assertEqual(cached.get(0), 0)
+ self.assertEqual(cached.get(1), 1)
+ self.assertEqual(cached.get(1), 1)
+ self.assertEqual(cached.get(1.0), 1)
+ self.assertEqual(cached.get(1.0), 1)
+
+ cached.cache.clear()
+ self.assertEqual(cached.get(1), 2)
+
+ def test_typed_dict(self):
+ cached = Cached(LRUCache(maxsize=2))
+
+ self.assertEqual(cached.get_typed(0), 0)
+ self.assertEqual(cached.get_typed(1), 1)
+ self.assertEqual(cached.get_typed(1), 1)
+ self.assertEqual(cached.get_typed(1.0), 2)
+ self.assertEqual(cached.get_typed(1.0), 2)
+ self.assertEqual(cached.get_typed(0.0), 3)
+ self.assertEqual(cached.get_typed(0), 4)
+
+ def test_lru(self):
+ cached = Cached(LRUCache(maxsize=2))
+
+ self.assertEqual(cached.get(0), 0)
+ self.assertEqual(cached.get(1), 1)
+ self.assertEqual(cached.get(1), 1)
+ self.assertEqual(cached.get(1.0), 1)
+ self.assertEqual(cached.get(1.0), 1)
+
+ cached.cache.clear()
+ self.assertEqual(cached.get(1), 2)
+
+ def test_typed_lru(self):
+ cached = Cached(LRUCache(maxsize=2))
+
+ self.assertEqual(cached.get_typed(0), 0)
+ self.assertEqual(cached.get_typed(1), 1)
+ self.assertEqual(cached.get_typed(1), 1)
+ self.assertEqual(cached.get_typed(1.0), 2)
+ self.assertEqual(cached.get_typed(1.0), 2)
+ self.assertEqual(cached.get_typed(0.0), 3)
+ self.assertEqual(cached.get_typed(0), 4)
+
+ def test_nospace(self):
+ cached = Cached(LRUCache(maxsize=0))
+
+ self.assertEqual(cached.get(0), 0)
+ self.assertEqual(cached.get(1), 1)
+ self.assertEqual(cached.get(1), 2)
+ self.assertEqual(cached.get(1.0), 3)
+ self.assertEqual(cached.get(1.0), 4)
+
+ def test_nocache(self):
+ cached = Cached(None)
+
+ self.assertEqual(cached.get(0), 0)
+ self.assertEqual(cached.get(1), 1)
+ self.assertEqual(cached.get(1), 2)
+ self.assertEqual(cached.get(1.0), 3)
+ self.assertEqual(cached.get(1.0), 4)
+
+ def test_weakref(self):
+ import weakref
+ import fractions
+ import gc
+
+ # in Python 3.4, `int` does not support weak references even
+ # when subclassed, but Fraction apparently does...
+ class Int(fractions.Fraction):
+ def __add__(self, other):
+ return Int(fractions.Fraction.__add__(self, other))
+
+ cached = Cached(weakref.WeakValueDictionary(), count=Int(0))
+
+ self.assertEqual(cached.get(0), 0)
+ gc.collect()
+ self.assertEqual(cached.get(0), 1)
+
+ ref = cached.get(1)
+ self.assertEqual(ref, 2)
+ self.assertEqual(cached.get(1), 2)
+ self.assertEqual(cached.get(1.0), 2)
+
+ ref = cached.get_typed(1)
+ self.assertEqual(ref, 3)
+ self.assertEqual(cached.get_typed(1), 3)
+ self.assertEqual(cached.get_typed(1.0), 4)
+
+ cached.cache.clear()
+ self.assertEqual(cached.get(1), 5)
+
+ def test_locked_dict(self):
+ cached = Locked({})
+
+ self.assertEqual(cached.get(0), 1)
+ self.assertEqual(cached.get(1), 3)
+ self.assertEqual(cached.get(1), 3)
+ self.assertEqual(cached.get(1.0), 3)
+ self.assertEqual(cached.get(2.0), 7)
+
+ def test_locked_nocache(self):
+ cached = Locked(None)
+
+ self.assertEqual(cached.get(0), 0)
+ self.assertEqual(cached.get(1), 0)
+ self.assertEqual(cached.get(1), 0)
+ self.assertEqual(cached.get(1.0), 0)
+ self.assertEqual(cached.get(1.0), 0)
+
+ def test_locked_nospace(self):
+ cached = Locked(LRUCache(maxsize=0))
+
+ self.assertEqual(cached.get(0), 1)
+ self.assertEqual(cached.get(1), 3)
+ self.assertEqual(cached.get(1), 5)
+ self.assertEqual(cached.get(1.0), 7)
+ self.assertEqual(cached.get(1.0), 9)
diff --git a/tests/test_mru.py b/tests/test_mru.py
new file mode 100644
index 0000000..d11dba4
--- /dev/null
+++ b/tests/test_mru.py
@@ -0,0 +1,50 @@
+import unittest
+
+from cachetools import MRUCache
+
+from . import CacheTestMixin
+
+
+class MRUCacheTest(unittest.TestCase, CacheTestMixin):
+
+ Cache = MRUCache
+
+ def test_evict__writes_only(self):
+ cache = MRUCache(maxsize=2)
+
+ cache[1] = 1
+ cache[2] = 2
+ cache[3] = 3 # Evicts 1 because nothing's been used yet
+
+ assert len(cache) == 2
+ assert 1 not in cache, "Wrong key was evicted. Should have been '1'."
+ assert 2 in cache
+ assert 3 in cache
+
+ def test_evict__with_access(self):
+ cache = MRUCache(maxsize=2)
+
+ cache[1] = 1
+ cache[2] = 2
+ cache[1]
+ cache[2]
+ cache[3] = 3 # Evicts 2
+ assert 2 not in cache, "Wrong key was evicted. Should have been '2'."
+ assert 1 in cache
+ assert 3 in cache
+
+ def test_evict__with_delete(self):
+ cache = MRUCache(maxsize=2)
+
+ cache[1] = 1
+ cache[2] = 2
+ del cache[2]
+ cache[3] = 3 # Doesn't evict anything because we just deleted 2
+
+ assert 2 not in cache
+ assert 1 in cache
+
+ cache[4] = 4 # Should evict 1 as we just accessed it with __contains__
+ assert 1 not in cache
+ assert 3 in cache
+ assert 4 in cache
diff --git a/tests/test_rr.py b/tests/test_rr.py
new file mode 100644
index 0000000..008978b
--- /dev/null
+++ b/tests/test_rr.py
@@ -0,0 +1,35 @@
+import unittest
+
+from cachetools import RRCache
+
+from . import CacheTestMixin
+
+
+class RRCacheTest(unittest.TestCase, CacheTestMixin):
+
+ Cache = RRCache
+
+ def test_rr(self):
+ cache = RRCache(maxsize=2, choice=min)
+ self.assertEqual(min, cache.choice)
+
+ cache[1] = 1
+ cache[2] = 2
+ cache[3] = 3
+
+ self.assertEqual(2, len(cache))
+ self.assertEqual(2, cache[2])
+ self.assertEqual(3, cache[3])
+ self.assertNotIn(1, cache)
+
+ cache[0] = 0
+ self.assertEqual(2, len(cache))
+ self.assertEqual(0, cache[0])
+ self.assertEqual(3, cache[3])
+ self.assertNotIn(2, cache)
+
+ cache[4] = 4
+ self.assertEqual(2, len(cache))
+ self.assertEqual(3, cache[3])
+ self.assertEqual(4, cache[4])
+ self.assertNotIn(0, cache)
diff --git a/tests/test_ttl.py b/tests/test_ttl.py
new file mode 100644
index 0000000..6e51a59
--- /dev/null
+++ b/tests/test_ttl.py
@@ -0,0 +1,196 @@
+import unittest
+
+from cachetools import TTLCache
+
+from . import CacheTestMixin
+
+
+class Timer:
+ def __init__(self, auto=False):
+ self.auto = auto
+ self.time = 0
+
+ def __call__(self):
+ if self.auto:
+ self.time += 1
+ return self.time
+
+ def tick(self):
+ self.time += 1
+
+
+class TTLTestCache(TTLCache):
+ def __init__(self, maxsize, ttl=0, **kwargs):
+ TTLCache.__init__(self, maxsize, ttl=ttl, timer=Timer(), **kwargs)
+
+
+class TTLCacheTest(unittest.TestCase, CacheTestMixin):
+
+ Cache = TTLTestCache
+
+ def test_ttl(self):
+ cache = TTLCache(maxsize=2, ttl=1, timer=Timer())
+ self.assertEqual(0, cache.timer())
+ self.assertEqual(1, cache.ttl)
+
+ cache[1] = 1
+ self.assertEqual({1}, set(cache))
+ self.assertEqual(1, len(cache))
+ self.assertEqual(1, cache[1])
+
+ cache.timer.tick()
+ self.assertEqual({1}, set(cache))
+ self.assertEqual(1, len(cache))
+ self.assertEqual(1, cache[1])
+
+ cache[2] = 2
+ self.assertEqual({1, 2}, set(cache))
+ self.assertEqual(2, len(cache))
+ self.assertEqual(1, cache[1])
+ self.assertEqual(2, cache[2])
+
+ cache.timer.tick()
+ self.assertEqual({2}, set(cache))
+ self.assertEqual(1, len(cache))
+ self.assertNotIn(1, cache)
+ self.assertEqual(2, cache[2])
+
+ cache[3] = 3
+ self.assertEqual({2, 3}, set(cache))
+ self.assertEqual(2, len(cache))
+ self.assertNotIn(1, cache)
+ self.assertEqual(2, cache[2])
+ self.assertEqual(3, cache[3])
+
+ cache.timer.tick()
+ self.assertEqual({3}, set(cache))
+ self.assertEqual(1, len(cache))
+ self.assertNotIn(1, cache)
+ self.assertNotIn(2, cache)
+ self.assertEqual(3, cache[3])
+
+ cache.timer.tick()
+ self.assertEqual(set(), set(cache))
+ self.assertEqual(0, len(cache))
+ self.assertNotIn(1, cache)
+ self.assertNotIn(2, cache)
+ self.assertNotIn(3, cache)
+
+ with self.assertRaises(KeyError):
+ del cache[1]
+ with self.assertRaises(KeyError):
+ cache.pop(2)
+ with self.assertRaises(KeyError):
+ del cache[3]
+
+ def test_ttl_lru(self):
+ cache = TTLCache(maxsize=2, ttl=0, timer=Timer())
+
+ cache[1] = 1
+ cache[2] = 2
+ cache[3] = 3
+
+ self.assertEqual(len(cache), 2)
+ self.assertNotIn(1, cache)
+ self.assertEqual(cache[2], 2)
+ self.assertEqual(cache[3], 3)
+
+ cache[2]
+ cache[4] = 4
+ self.assertEqual(len(cache), 2)
+ self.assertNotIn(1, cache)
+ self.assertEqual(cache[2], 2)
+ self.assertNotIn(3, cache)
+ self.assertEqual(cache[4], 4)
+
+ cache[5] = 5
+ self.assertEqual(len(cache), 2)
+ self.assertNotIn(1, cache)
+ self.assertNotIn(2, cache)
+ self.assertNotIn(3, cache)
+ self.assertEqual(cache[4], 4)
+ self.assertEqual(cache[5], 5)
+
+ def test_ttl_expire(self):
+ cache = TTLCache(maxsize=3, ttl=2, timer=Timer())
+ with cache.timer as time:
+ self.assertEqual(time, cache.timer())
+ self.assertEqual(2, cache.ttl)
+
+ cache[1] = 1
+ cache.timer.tick()
+ cache[2] = 2
+ cache.timer.tick()
+ cache[3] = 3
+ self.assertEqual(2, cache.timer())
+
+ self.assertEqual({1, 2, 3}, set(cache))
+ self.assertEqual(3, len(cache))
+ self.assertEqual(1, cache[1])
+ self.assertEqual(2, cache[2])
+ self.assertEqual(3, cache[3])
+
+ cache.expire()
+ self.assertEqual({1, 2, 3}, set(cache))
+ self.assertEqual(3, len(cache))
+ self.assertEqual(1, cache[1])
+ self.assertEqual(2, cache[2])
+ self.assertEqual(3, cache[3])
+
+ cache.expire(3)
+ self.assertEqual({2, 3}, set(cache))
+ self.assertEqual(2, len(cache))
+ self.assertNotIn(1, cache)
+ self.assertEqual(2, cache[2])
+ self.assertEqual(3, cache[3])
+
+ cache.expire(4)
+ self.assertEqual({3}, set(cache))
+ self.assertEqual(1, len(cache))
+ self.assertNotIn(1, cache)
+ self.assertNotIn(2, cache)
+ self.assertEqual(3, cache[3])
+
+ cache.expire(5)
+ self.assertEqual(set(), set(cache))
+ self.assertEqual(0, len(cache))
+ self.assertNotIn(1, cache)
+ self.assertNotIn(2, cache)
+ self.assertNotIn(3, cache)
+
+ def test_ttl_atomic(self):
+ cache = TTLCache(maxsize=1, ttl=1, timer=Timer(auto=True))
+ cache[1] = 1
+ self.assertEqual(1, cache[1])
+ cache[1] = 1
+ self.assertEqual(1, cache.get(1))
+ cache[1] = 1
+ self.assertEqual(1, cache.pop(1))
+ cache[1] = 1
+ self.assertEqual(1, cache.setdefault(1))
+ cache[1] = 1
+ cache.clear()
+ self.assertEqual(0, len(cache))
+
+ def test_ttl_tuple_key(self):
+ cache = TTLCache(maxsize=1, ttl=0, timer=Timer())
+ self.assertEqual(0, cache.ttl)
+
+ cache[(1, 2, 3)] = 42
+ self.assertEqual(42, cache[(1, 2, 3)])
+ cache.timer.tick()
+ with self.assertRaises(KeyError):
+ cache[(1, 2, 3)]
+ self.assertNotIn((1, 2, 3), cache)
+
+ def test_ttl_datetime(self):
+ from datetime import datetime, timedelta
+
+ cache = TTLCache(maxsize=1, ttl=timedelta(days=1), timer=datetime.now)
+
+ cache[1] = 1
+ self.assertEqual(1, len(cache))
+ cache.expire(datetime.now())
+ self.assertEqual(1, len(cache))
+ cache.expire(datetime.now() + timedelta(days=1))
+ self.assertEqual(0, len(cache))
diff --git a/tests/test_wrapper.py b/tests/test_wrapper.py
new file mode 100644
index 0000000..37af16b
--- /dev/null
+++ b/tests/test_wrapper.py
@@ -0,0 +1,153 @@
+import unittest
+
+import cachetools
+import cachetools.keys
+
+
+class DecoratorTestMixin:
+ def cache(self, minsize):
+ raise NotImplementedError
+
+ def func(self, *args, **kwargs):
+ if hasattr(self, "count"):
+ self.count += 1
+ else:
+ self.count = 0
+ return self.count
+
+ def test_decorator(self):
+ cache = self.cache(2)
+ wrapper = cachetools.cached(cache)(self.func)
+
+ self.assertEqual(len(cache), 0)
+ self.assertEqual(wrapper.__wrapped__, self.func)
+
+ self.assertEqual(wrapper(0), 0)
+ self.assertEqual(len(cache), 1)
+ self.assertIn(cachetools.keys.hashkey(0), cache)
+ self.assertNotIn(cachetools.keys.hashkey(1), cache)
+ self.assertNotIn(cachetools.keys.hashkey(1.0), cache)
+
+ self.assertEqual(wrapper(1), 1)
+ self.assertEqual(len(cache), 2)
+ self.assertIn(cachetools.keys.hashkey(0), cache)
+ self.assertIn(cachetools.keys.hashkey(1), cache)
+ self.assertIn(cachetools.keys.hashkey(1.0), cache)
+
+ self.assertEqual(wrapper(1), 1)
+ self.assertEqual(len(cache), 2)
+
+ self.assertEqual(wrapper(1.0), 1)
+ self.assertEqual(len(cache), 2)
+
+ self.assertEqual(wrapper(1.0), 1)
+ self.assertEqual(len(cache), 2)
+
+ def test_decorator_typed(self):
+ cache = self.cache(3)
+ key = cachetools.keys.typedkey
+ wrapper = cachetools.cached(cache, key=key)(self.func)
+
+ self.assertEqual(len(cache), 0)
+ self.assertEqual(wrapper.__wrapped__, self.func)
+
+ self.assertEqual(wrapper(0), 0)
+ self.assertEqual(len(cache), 1)
+ self.assertIn(cachetools.keys.typedkey(0), cache)
+ self.assertNotIn(cachetools.keys.typedkey(1), cache)
+ self.assertNotIn(cachetools.keys.typedkey(1.0), cache)
+
+ self.assertEqual(wrapper(1), 1)
+ self.assertEqual(len(cache), 2)
+ self.assertIn(cachetools.keys.typedkey(0), cache)
+ self.assertIn(cachetools.keys.typedkey(1), cache)
+ self.assertNotIn(cachetools.keys.typedkey(1.0), cache)
+
+ self.assertEqual(wrapper(1), 1)
+ self.assertEqual(len(cache), 2)
+
+ self.assertEqual(wrapper(1.0), 2)
+ self.assertEqual(len(cache), 3)
+ self.assertIn(cachetools.keys.typedkey(0), cache)
+ self.assertIn(cachetools.keys.typedkey(1), cache)
+ self.assertIn(cachetools.keys.typedkey(1.0), cache)
+
+ self.assertEqual(wrapper(1.0), 2)
+ self.assertEqual(len(cache), 3)
+
+ def test_decorator_lock(self):
+ class Lock:
+
+ count = 0
+
+ def __enter__(self):
+ Lock.count += 1
+
+ def __exit__(self, *exc):
+ pass
+
+ cache = self.cache(2)
+ wrapper = cachetools.cached(cache, lock=Lock())(self.func)
+
+ self.assertEqual(len(cache), 0)
+ self.assertEqual(wrapper.__wrapped__, self.func)
+ self.assertEqual(wrapper(0), 0)
+ self.assertEqual(Lock.count, 2)
+ self.assertEqual(wrapper(1), 1)
+ self.assertEqual(Lock.count, 4)
+ self.assertEqual(wrapper(1), 1)
+ self.assertEqual(Lock.count, 5)
+
+
+class CacheWrapperTest(unittest.TestCase, DecoratorTestMixin):
+ def cache(self, minsize):
+ return cachetools.Cache(maxsize=minsize)
+
+ def test_zero_size_cache_decorator(self):
+ cache = self.cache(0)
+ wrapper = cachetools.cached(cache)(self.func)
+
+ self.assertEqual(len(cache), 0)
+ self.assertEqual(wrapper.__wrapped__, self.func)
+
+ self.assertEqual(wrapper(0), 0)
+ self.assertEqual(len(cache), 0)
+
+ def test_zero_size_cache_decorator_lock(self):
+ class Lock:
+
+ count = 0
+
+ def __enter__(self):
+ Lock.count += 1
+
+ def __exit__(self, *exc):
+ pass
+
+ cache = self.cache(0)
+ wrapper = cachetools.cached(cache, lock=Lock())(self.func)
+
+ self.assertEqual(len(cache), 0)
+ self.assertEqual(wrapper.__wrapped__, self.func)
+
+ self.assertEqual(wrapper(0), 0)
+ self.assertEqual(len(cache), 0)
+ self.assertEqual(Lock.count, 2)
+
+
+class DictWrapperTest(unittest.TestCase, DecoratorTestMixin):
+ def cache(self, minsize):
+ return dict()
+
+
+class NoneWrapperTest(unittest.TestCase):
+ def func(self, *args, **kwargs):
+ return args + tuple(kwargs.items())
+
+ def test_decorator(self):
+ wrapper = cachetools.cached(None)(self.func)
+ self.assertEqual(wrapper.__wrapped__, self.func)
+
+ self.assertEqual(wrapper(0), (0,))
+ self.assertEqual(wrapper(1), (1,))
+ self.assertEqual(wrapper(1, foo="bar"), (1, ("foo", "bar")))
diff --git a/tox.ini b/tox.ini
new file mode 100644
index 0000000..b36d00c
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,39 @@
+[tox]
+envlist = check-manifest,docs,doctest,flake8,py
+
+[testenv]
+deps =
+ pytest
+ pytest-cov
+commands =
+ py.test --basetemp={envtmpdir} --cov=cachetools {posargs}
+
+[testenv:check-manifest]
+deps =
+ check-manifest==0.44; python_version < "3.8"
+ check-manifest; python_version >= "3.8"
+commands =
+ check-manifest
+skip_install = true
+
+[testenv:docs]
+deps =
+ sphinx
+commands =
+ sphinx-build -W -b html -d {envtmpdir}/doctrees docs {envtmpdir}/html
+
+[testenv:doctest]
+deps =
+ sphinx
+commands =
+ sphinx-build -W -b doctest -d {envtmpdir}/doctrees docs {envtmpdir}/doctest
+
+[testenv:flake8]
+deps =
+ flake8
+ flake8-black; implementation_name == "cpython"
+ flake8-bugbear
+ flake8-import-order
+commands =
+ flake8
+skip_install = true