From fa0e60627c2f27f2dbd84f66c5739a9f3bf16d47 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Sat, 22 Mar 2014 11:09:33 +0100 Subject: Initial commit. --- .gitignore | 36 ++++++++ Changes | 3 + LICENSE | 20 +++++ MANIFEST.in | 6 ++ README.rst | 34 ++++++++ cachetools.py | 54 ++++++++++++ docs/.gitignore | 1 + docs/Makefile | 153 ++++++++++++++++++++++++++++++++++ docs/conf.py | 245 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ docs/index.rst | 23 +++++ setup.cfg | 10 +++ setup.py | 35 ++++++++ tests/__init__.py | 0 13 files changed, 620 insertions(+) create mode 100644 .gitignore create mode 100644 Changes create mode 100644 LICENSE create mode 100644 MANIFEST.in create mode 100644 README.rst create mode 100644 cachetools.py create mode 100644 docs/.gitignore create mode 100644 docs/Makefile create mode 100644 docs/conf.py create mode 100644 docs/index.rst create mode 100644 setup.cfg create mode 100644 setup.py create mode 100644 tests/__init__.py diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..ded6067 --- /dev/null +++ b/.gitignore @@ -0,0 +1,36 @@ +*.py[cod] + +# C extensions +*.so + +# Packages +*.egg +*.egg-info +dist +build +eggs +parts +bin +var +sdist +develop-eggs +.installed.cfg +lib +lib64 +__pycache__ + +# Installer logs +pip-log.txt + +# Unit test / coverage reports +.coverage +.tox +nosetests.xml + +# Translations +*.mo + +# Mr Developer +.mr.developer.cfg +.project +.pydevproject diff --git a/Changes b/Changes new file mode 100644 index 0000000..8da350c --- /dev/null +++ b/Changes @@ -0,0 +1,3 @@ +0.0.1 Development + +* Initial alpha release. diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..380c344 --- /dev/null +++ b/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2014 Thomas Kemmer + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 0000000..c9346d9 --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,6 @@ +include Changes +include LICENSE +include MANIFEST.in +include README.rst + +recursive-include tests *.py diff --git a/README.rst b/README.rst new file mode 100644 index 0000000..df10ac4 --- /dev/null +++ b/README.rst @@ -0,0 +1,34 @@ +cachetools +======================================================================== + +This module... + +.. code-block:: pycon + + >>> from cachetools import LRUCache, LFUCache + + +Installation +------------------------------------------------------------------------ + +Install cachetools using pip:: + + pip install cachetools + + +Project Resources +------------------------------------------------------------------------ + +- `Documentation `_ +- `Issue Tracker `_ +- `Source Code `_ +- `Change Log `_ + + +.. image:: https://pypip.in/v/cachetools/badge.png + :target: https://pypi.python.org/pypi/cachetools/ + :alt: Latest PyPI version + +.. image:: https://pypip.in/d/cachetools/badge.png + :target: https://pypi.python.org/pypi/cachetools/ + :alt: Number of PyPI downloads diff --git a/cachetools.py b/cachetools.py new file mode 100644 index 0000000..bd8b4a2 --- /dev/null +++ b/cachetools.py @@ -0,0 +1,54 @@ +"""TODO""" +import collections + +__version__ = '0.0.1' + + +class LRUCache(collections.MutableMapping): + + def __init__(self, maxsize): + pass + + def __getitem__(self, key): + pass + + def __setitem__(self, key, value): + pass + + def __delitem__(self, key): + pass + + def __iter__(self): + pass + + def __len(self): + pass + + +class LFUCache(collections.MutableMapping): + + def __init__(self, maxsize): + pass + + def __getitem__(self, key): + pass + + def __setitem__(self, key, value): + pass + + def __delitem__(self, key): + pass + + def __iter__(self): + pass + + def __len(self): + pass + + +def lru_cache(maxsize=128, typed=False): + pass + + +def lfu_cache(maxsize=128, typed=False): + pass diff --git a/docs/.gitignore b/docs/.gitignore new file mode 100644 index 0000000..e35d885 --- /dev/null +++ b/docs/.gitignore @@ -0,0 +1 @@ +_build diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 0000000..c88ce22 --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,153 @@ +# Makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +PAPER = +BUILDDIR = _build + +# Internal variables. +PAPEROPT_a4 = -D latex_paper_size=a4 +PAPEROPT_letter = -D latex_paper_size=letter +ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . +# the i18n builder cannot share the environment and doctrees with the others +I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . + +.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext + +help: + @echo "Please use \`make ' where is one of" + @echo " html to make standalone HTML files" + @echo " dirhtml to make HTML files named index.html in directories" + @echo " singlehtml to make a single large HTML file" + @echo " pickle to make pickle files" + @echo " json to make JSON files" + @echo " htmlhelp to make HTML files and a HTML help project" + @echo " qthelp to make HTML files and a qthelp project" + @echo " devhelp to make HTML files and a Devhelp project" + @echo " epub to make an epub" + @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" + @echo " latexpdf to make LaTeX files and run them through pdflatex" + @echo " text to make text files" + @echo " man to make manual pages" + @echo " texinfo to make Texinfo files" + @echo " info to make Texinfo files and run them through makeinfo" + @echo " gettext to make PO message catalogs" + @echo " changes to make an overview of all changed/added/deprecated items" + @echo " linkcheck to check all external links for integrity" + @echo " doctest to run all doctests embedded in the documentation (if enabled)" + +clean: + -rm -rf $(BUILDDIR)/* + +html: + $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." + +dirhtml: + $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." + +singlehtml: + $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml + @echo + @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." + +pickle: + $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle + @echo + @echo "Build finished; now you can process the pickle files." + +json: + $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json + @echo + @echo "Build finished; now you can process the JSON files." + +htmlhelp: + $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp + @echo + @echo "Build finished; now you can run HTML Help Workshop with the" \ + ".hhp project file in $(BUILDDIR)/htmlhelp." + +qthelp: + $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp + @echo + @echo "Build finished; now you can run "qcollectiongenerator" with the" \ + ".qhcp project file in $(BUILDDIR)/qthelp, like this:" + @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/cachetools.qhcp" + @echo "To view the help file:" + @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/cachetools.qhc" + +devhelp: + $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp + @echo + @echo "Build finished." + @echo "To view the help file:" + @echo "# mkdir -p $$HOME/.local/share/devhelp/cachetools" + @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/cachetools" + @echo "# devhelp" + +epub: + $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub + @echo + @echo "Build finished. The epub file is in $(BUILDDIR)/epub." + +latex: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo + @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." + @echo "Run \`make' in that directory to run these through (pdf)latex" \ + "(use \`make latexpdf' here to do that automatically)." + +latexpdf: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through pdflatex..." + $(MAKE) -C $(BUILDDIR)/latex all-pdf + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + +text: + $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text + @echo + @echo "Build finished. The text files are in $(BUILDDIR)/text." + +man: + $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man + @echo + @echo "Build finished. The manual pages are in $(BUILDDIR)/man." + +texinfo: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo + @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." + @echo "Run \`make' in that directory to run these through makeinfo" \ + "(use \`make info' here to do that automatically)." + +info: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo "Running Texinfo files through makeinfo..." + make -C $(BUILDDIR)/texinfo info + @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." + +gettext: + $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale + @echo + @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." + +changes: + $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes + @echo + @echo "The overview file is in $(BUILDDIR)/changes." + +linkcheck: + $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck + @echo + @echo "Link check complete; look for any errors in the above output " \ + "or in $(BUILDDIR)/linkcheck/output.txt." + +doctest: + $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest + @echo "Testing of doctests in the sources finished, look at the " \ + "results in $(BUILDDIR)/doctest/output.txt." diff --git a/docs/conf.py b/docs/conf.py new file mode 100644 index 0000000..bcfbeed --- /dev/null +++ b/docs/conf.py @@ -0,0 +1,245 @@ +# -*- coding: utf-8 -*- +# +# cachetools documentation build configuration file, created by +# sphinx-quickstart on Mon Feb 10 09:15:34 2014. +# +# This file is execfile()d with the current directory set to its containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sys +import os + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +#sys.path.insert(0, os.path.abspath('.')) +sys.path.insert(0, os.path.abspath('..')) +from cachetools import __version__ + +# -- General configuration ----------------------------------------------------- + +# If your documentation needs a minimal Sphinx version, state it here. +#needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be extensions +# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. +extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.todo', 'sphinx.ext.coverage'] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix of source filenames. +source_suffix = '.rst' + +# The encoding of source files. +#source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = u'cachetools' +copyright = u'2014, Thomas Kemmer' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +version = __version__ +# The full version, including alpha/beta/rc tags. +release = version + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +#language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +#today = '' +# Else, today_fmt is used as the format for a strftime call. +#today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = ['_build'] + +# The reST default role (used for this markup: `text`) to use for all documents. +#default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +#add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +#add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +#show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# A list of ignored prefixes for module index sorting. +#modindex_common_prefix = [] + + +# -- Options for HTML output --------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = 'default' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +#html_theme_options = {} + +# Add any paths that contain custom themes here, relative to this directory. +#html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +#html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +#html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +#html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +#html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +#html_static_path = ['_static'] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +#html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +#html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +#html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +#html_additional_pages = {} + +# If false, no module index is generated. +#html_domain_indices = True + +# If false, no index is generated. +#html_use_index = True + +# If true, the index is split into individual pages for each letter. +#html_split_index = False + +# If true, links to the reST sources are added to the pages. +#html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +#html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +#html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +#html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +#html_file_suffix = None + +# Output file base name for HTML help builder. +htmlhelp_basename = 'cachetoolsdoc' + + +# -- Options for LaTeX output -------------------------------------------------- + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + #'papersize': 'letterpaper', + + # The font size ('10pt', '11pt' or '12pt'). + #'pointsize': '10pt', + + # Additional stuff for the LaTeX preamble. + #'preamble': '', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, author, documentclass [howto/manual]). +latex_documents = [ + ('index', 'cachetools.tex', u'cachetools Documentation', + u'Thomas Kemmer', 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +#latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +#latex_use_parts = False + +# If true, show page references after internal links. +#latex_show_pagerefs = False + +# If true, show URL addresses after external links. +#latex_show_urls = False + +# Documents to append as an appendix to all manuals. +#latex_appendices = [] + +# If false, no module index is generated. +#latex_domain_indices = True + + +# -- Options for manual page output -------------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + ('index', 'cachetools', u'cachetools Documentation', + [u'Thomas Kemmer'], 1) +] + +# If true, show URL addresses after external links. +#man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------------ + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ('index', 'cachetools', u'cachetools Documentation', + u'Thomas Kemmer', 'cachetools', 'One line description of project.', + 'Miscellaneous'), +] + +# Documents to append as an appendix to all manuals. +#texinfo_appendices = [] + +# If false, no module index is generated. +#texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +#texinfo_show_urls = 'footnote' diff --git a/docs/index.rst b/docs/index.rst new file mode 100644 index 0000000..caa89a8 --- /dev/null +++ b/docs/index.rst @@ -0,0 +1,23 @@ +:mod:`cachetools` --- TODO +======================================================================= + +.. module:: cachetools + +This module... + + +.. code-block:: pycon + + >>> from cachetools import LRUCache, LFUCache + + +.. autoclass:: LRUCache + :members: + +.. autoclass:: LFUCache + :members: + + +.. autofunction:: lru_cache + +.. autofunction:: lfu_cache diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 0000000..53beacd --- /dev/null +++ b/setup.cfg @@ -0,0 +1,10 @@ +[build_sphinx] +source-dir = docs/ +build-dir = docs/_build +all_files = 1 + +[upload_sphinx] +upload-dir = docs/_build/html + +[flake8] +exclude = docs/* diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..a0fd05f --- /dev/null +++ b/setup.py @@ -0,0 +1,35 @@ +from setuptools import setup + + +def get_version(filename): + import re + content = open(filename).read() + metadata = dict(re.findall("__([a-z]+)__ = '([^']+)'", content)) + return metadata['version'] + +setup( + name='cachetools', + version=get_version('cachetools.py'), + author='Thomas Kemmer', + author_email='tkemmer@computer.org', + url='https://github.com/tkem/cachetools', + license='MIT', + description='TODO', # noqa + long_description=open('README.rst').read(), + keywords='cache caching lru lfu ttl', + classifiers=[ + 'Development Status :: 3 - Alpha', + 'Environment :: Other Environment', + 'Intended Audience :: Developers', + 'License :: OSI Approved :: MIT License', + 'Operating System :: OS Independent', + 'Programming Language :: Python', + 'Programming Language :: Python :: 2', + 'Programming Language :: Python :: 2.7', + 'Topic :: Internet', + 'Topic :: Software Development :: Libraries :: Python Modules' + ], + py_modules=['cachetools'], + test_suite='nose.collector', + tests_require=['nose'] +) diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..e69de29 -- cgit v1.2.3 From c2e0e6be6252ac1fe568e9107a613c903f37f579 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Sat, 22 Mar 2014 11:16:12 +0100 Subject: Add description. --- cachetools.py | 2 +- docs/index.rst | 2 +- setup.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cachetools.py b/cachetools.py index bd8b4a2..d21487b 100644 --- a/cachetools.py +++ b/cachetools.py @@ -1,4 +1,4 @@ -"""TODO""" +"""Python 2.7 memoizing collections and decorators""" import collections __version__ = '0.0.1' diff --git a/docs/index.rst b/docs/index.rst index caa89a8..1cc052f 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -1,4 +1,4 @@ -:mod:`cachetools` --- TODO +:mod:`cachetools` --- Python 2.7 memoizing collections and decorators ======================================================================= .. module:: cachetools diff --git a/setup.py b/setup.py index a0fd05f..d7bae66 100644 --- a/setup.py +++ b/setup.py @@ -14,7 +14,7 @@ setup( author_email='tkemmer@computer.org', url='https://github.com/tkem/cachetools', license='MIT', - description='TODO', # noqa + description='Python 2.7 memoizing collections and decorators', # noqa long_description=open('README.rst').read(), keywords='cache caching lru lfu ttl', classifiers=[ -- cgit v1.2.3 From a34b1fa18a3494b6501dc5eaf8114e507fe88cc9 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Sat, 22 Mar 2014 12:57:39 +0100 Subject: Add initial implementation, unit tests --- README.rst | 45 +++++++++++++--- cachetools.py | 141 +++++++++++++++++++++++++++++++++++++++++-------- docs/index.rst | 20 ++++++- setup.py | 2 +- tests/test_lfucache.py | 31 +++++++++++ tests/test_lrucache.py | 37 +++++++++++++ 6 files changed, 245 insertions(+), 31 deletions(-) create mode 100644 tests/test_lfucache.py create mode 100644 tests/test_lrucache.py diff --git a/README.rst b/README.rst index df10ac4..afe3375 100644 --- a/README.rst +++ b/README.rst @@ -1,11 +1,26 @@ cachetools ======================================================================== -This module... +This module provides various memoizing collections and function +decorators, including a variant of the Python 3 functools.lru_cache_ +decorator. + +.. note:: + + This module is in early pre-alpha, and not fit for *any* purpose + (yet). .. code-block:: pycon - >>> from cachetools import LRUCache, LFUCache + >>> from cachetools import LRUCache + >>> cache = LRUCache(maxsize=16) + >>> cache['test'] = 1 + >>> cache.info() + CacheInfo(hits=0, misses=0, maxsize=16, currsize=1) + >>> cache['test'] + 1 + >>> cache.info() + CacheInfo(hits=1, misses=0, maxsize=16, currsize=1) Installation @@ -19,11 +34,10 @@ Install cachetools using pip:: Project Resources ------------------------------------------------------------------------ -- `Documentation `_ -- `Issue Tracker `_ -- `Source Code `_ -- `Change Log `_ - +- `Documentation`_ +- `Issue Tracker`_ +- `Source Code`_ +- `Change Log`_ .. image:: https://pypip.in/v/cachetools/badge.png :target: https://pypi.python.org/pypi/cachetools/ @@ -32,3 +46,20 @@ Project Resources .. image:: https://pypip.in/d/cachetools/badge.png :target: https://pypi.python.org/pypi/cachetools/ :alt: Number of PyPI downloads + + +License +------------------------------------------------------------------------ + +cachetools is Copyright 2014 Thomas Kemmer. + +Licensed under the `MIT License`_. + + +.. _functools.lru_cache: http://docs.python.org/3.4/library/functools.html#functools.lru_cache + +.. _Documentation: http://pythonhosted.org/cachetools/ +.. _Source Code: https://github.com/tkem/cachetools/ +.. _Issue Tracker: https://github.com/tkem/cachetools/issues/ +.. _Change Log: https://raw.github.com/tkem/cachetools/master/Changes +.. _MIT License: http://opensource.org/licenses/MIT diff --git a/cachetools.py b/cachetools.py index d21487b..72229a4 100644 --- a/cachetools.py +++ b/cachetools.py @@ -1,54 +1,153 @@ """Python 2.7 memoizing collections and decorators""" import collections +import functools +import threading -__version__ = '0.0.1' +__version__ = '0.0.0' + + +CacheInfo = collections.namedtuple('CacheInfo', 'hits misses maxsize currsize') class LRUCache(collections.MutableMapping): - def __init__(self, maxsize): - pass + def __init__(self, maxsize, lock=threading.RLock): + self.__maxsize = maxsize + self.__lock = lock() + self.__cache = collections.OrderedDict() + self.__hits = 0 + self.__misses = 0 def __getitem__(self, key): - pass + with self.__lock: + try: + value = self.__cache[key] + except KeyError: + self.__misses += 1 + raise + self.__hits += 1 + self._update(key, value) + return value def __setitem__(self, key, value): - pass + with self.__lock: + if len(self.__cache) >= self.__maxsize: + # FIXME: popitem + del self.__cache[next(iter(self.__cache))] + self.__cache[key] = value + self._update(key, value) def __delitem__(self, key): - pass + with self._lock: + del self.__cache[key] def __iter__(self): - pass + return iter(self.__cache) + + def __len__(self): + return len(self.__cache) - def __len(self): - pass + def _update(self, key, value): + del self.__cache[key] + self.__cache[key] = value + + def info(self): + return CacheInfo(self.__hits, self.__misses, self.__maxsize, len(self)) class LFUCache(collections.MutableMapping): - def __init__(self, maxsize): - pass + def __init__(self, maxsize, lock=threading.RLock): + self.__maxsize = maxsize + self.__lock = lock() + self.__cache = {} + self.__count = collections.Counter() + self.__hits = 0 + self.__misses = 0 def __getitem__(self, key): - pass + with self.__lock: + value = self.__cache[key] + self.__count[key] += 1 + return value def __setitem__(self, key, value): - pass + with self.__lock: + if len(self.__cache) >= self.__maxsize: + key, _ = self.__count.most_common()[-1] + del self.__cache[key] + del self.__count[key] + self.__cache[key] = value + self.__count[key] = 0 def __delitem__(self, key): - pass + del self.__cache[key] + del self.__count[key] def __iter__(self): - pass + return iter(self.__cache) + + def __len__(self): + return len(self.__cache) + + def info(self): + return CacheInfo(self.__hits, self.__misses, self.__maxsize, len(self)) + + +def makekey(args, kwargs, typed=False): + # TODO: support typed argument keys + return (tuple(sorted(kwargs.items()))) + args + + +def lru_cache(maxsize=128, typed=False, key=makekey): + def decorator(func): + cache = LRUCache(maxsize) + + @functools.wraps(func) + def wrapper(*args, **kwargs): + key = makekey(args, kwargs, typed) + try: + return cache[key] + except KeyError: + result = func(*args, **kwargs) + cache[key] = result + return result + + def cache_info(): + return cache.info() + + def cache_clear(): + cache.clear() + + wrapper.cache_info = cache_info + wrapper.cache_clear = cache_clear + return wrapper + + return decorator + + +def lfu_cache(maxsize=128, typed=False, key=makekey): + def decorator(func): + cache = LRUCache(maxsize) - def __len(self): - pass + @functools.wraps(func) + def wrapper(*args, **kwargs): + key = makekey(args, kwargs, typed) + try: + return cache[key] + except KeyError: + result = func(*args, **kwargs) + cache[key] = result + return result + def cache_info(): + return cache.info() -def lru_cache(maxsize=128, typed=False): - pass + def cache_clear(): + cache.clear() + wrapper.cache_info = cache_info + wrapper.cache_clear = cache_clear + return wrapper -def lfu_cache(maxsize=128, typed=False): - pass + return decorator diff --git a/docs/index.rst b/docs/index.rst index 1cc052f..38c0c0d 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -3,12 +3,26 @@ .. module:: cachetools -This module... +This module provides various memoizing collections and function +decorators, including a variant of the Python 3 functools.lru_cache_ +decorator. +.. note:: + + This module is in early pre-alpha, and not fit for *any* purpose + (yet). .. code-block:: pycon - >>> from cachetools import LRUCache, LFUCache + >>> from cachetools import LRUCache + >>> cache = LRUCache(maxsize=16) + >>> cache['test'] = 1 + >>> cache.info() + CacheInfo(hits=0, misses=0, maxsize=16, currsize=1) + >>> cache['test'] + 1 + >>> cache.info() + CacheInfo(hits=1, misses=0, maxsize=16, currsize=1) .. autoclass:: LRUCache @@ -21,3 +35,5 @@ This module... .. autofunction:: lru_cache .. autofunction:: lfu_cache + +.. _functools.lru_cache: http://docs.python.org/3.4/library/functools.html#functools.lru_cache diff --git a/setup.py b/setup.py index d7bae66..e165d15 100644 --- a/setup.py +++ b/setup.py @@ -18,7 +18,7 @@ setup( long_description=open('README.rst').read(), keywords='cache caching lru lfu ttl', classifiers=[ - 'Development Status :: 3 - Alpha', + 'Development Status :: 2 - Pre-Alpha', 'Environment :: Other Environment', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', diff --git a/tests/test_lfucache.py b/tests/test_lfucache.py new file mode 100644 index 0000000..f9e4e01 --- /dev/null +++ b/tests/test_lfucache.py @@ -0,0 +1,31 @@ +import unittest + +from cachetools import LFUCache, lfu_cache + + +@lfu_cache(maxsize=2) +def cached(n): + return n + + +class LFUCacheTest(unittest.TestCase): + + def test_insert(self): + cache = LFUCache(maxsize=2) + cache['a'] = 0 + cache['a'] = 1 + cache['b'] = 2 + cache['c'] = 3 + + self.assertEqual(cache['a'], 1) + self.assertTrue('b' in cache or 'c' in cache) + self.assertTrue('b' not in cache or 'c' not in cache) + + cache['a'] = 4 + self.assertEqual(cache['a'], 4) + + def test_decorator(self): + self.assertEqual(cached(1), 1) + self.assertItemsEqual(cached.cache_info(), [0, 1, 2, 1]) + self.assertEqual(cached(1), 1) + self.assertItemsEqual(cached.cache_info(), [1, 1, 2, 1]) diff --git a/tests/test_lrucache.py b/tests/test_lrucache.py new file mode 100644 index 0000000..d5eea5d --- /dev/null +++ b/tests/test_lrucache.py @@ -0,0 +1,37 @@ +import unittest + +from cachetools import LRUCache, lru_cache + + +@lru_cache(maxsize=2) +def cached(n): + return n + + +class LRUCacheTest(unittest.TestCase): + + def test_insert(self): + cache = LRUCache(maxsize=2) + cache['a'] = 1 + cache['b'] = 2 + cache['c'] = 3 + + self.assertEqual(cache['b'], 2) + self.assertEqual(cache['c'], 3) + self.assertNotIn('a', cache) + + cache['a'] = 4 + self.assertEqual(cache['a'], 4) + self.assertEqual(cache['c'], 3) + self.assertNotIn('b', cache) + + cache['b'] = 5 + self.assertEqual(cache['b'], 5) + self.assertEqual(cache['c'], 3) + self.assertNotIn('a', cache) + + def test_decorator(self): + self.assertEqual(cached(1), 1) + self.assertItemsEqual(cached.cache_info(), [0, 1, 2, 1]) + self.assertEqual(cached(1), 1) + self.assertItemsEqual(cached.cache_info(), [1, 1, 2, 1]) -- cgit v1.2.3 From 37c58d04f5357140c18d7117702598039451744d Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Sat, 22 Mar 2014 13:03:32 +0100 Subject: Update documentation. --- README.rst | 12 +++++------- docs/index.rst | 6 +++--- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/README.rst b/README.rst index afe3375..e310f47 100644 --- a/README.rst +++ b/README.rst @@ -2,13 +2,11 @@ cachetools ======================================================================== This module provides various memoizing collections and function -decorators, including a variant of the Python 3 functools.lru_cache_ -decorator. +decorators, including a variant of the Python 3 Standard Library +lru_cache_ decorator. -.. note:: - - This module is in early pre-alpha, and not fit for *any* purpose - (yet). + Important Note: This module is in early pre-alpha, and not fit for + *any* purpose (yet). .. code-block:: pycon @@ -56,7 +54,7 @@ cachetools is Copyright 2014 Thomas Kemmer. Licensed under the `MIT License`_. -.. _functools.lru_cache: http://docs.python.org/3.4/library/functools.html#functools.lru_cache +.. _lru_cache: http://docs.python.org/3.4/library/functools.html#functools.lru_cache .. _Documentation: http://pythonhosted.org/cachetools/ .. _Source Code: https://github.com/tkem/cachetools/ diff --git a/docs/index.rst b/docs/index.rst index 38c0c0d..7c37641 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -4,8 +4,8 @@ .. module:: cachetools This module provides various memoizing collections and function -decorators, including a variant of the Python 3 functools.lru_cache_ -decorator. +decorators, including a variant of the Python 3 Standard Library +lru_cache_ decorator. .. note:: @@ -36,4 +36,4 @@ decorator. .. autofunction:: lfu_cache -.. _functools.lru_cache: http://docs.python.org/3.4/library/functools.html#functools.lru_cache +.. _lru_cache: http://docs.python.org/3/library/functools.html#functools.lru_cache -- cgit v1.2.3 From 35f85f2f78f842b15609c5973f1fdd261c0a6476 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Tue, 25 Mar 2014 06:44:31 +0100 Subject: Prepare v0.1.0 --- .coveragerc | 5 + Changes | 4 +- LICENSE | 20 ---- MANIFEST.in | 2 +- MIT-LICENSE | 20 ++++ README.rst | 49 ++++++---- cachetools.py | 252 ++++++++++++++++++++++++++++++------------------- docs/index.rst | 73 ++++++++++---- setup.py | 9 +- tests/test_lfucache.py | 14 ++- tests/test_lrucache.py | 25 +++-- tests/test_rrcache.py | 29 ++++++ 12 files changed, 332 insertions(+), 170 deletions(-) create mode 100644 .coveragerc delete mode 100644 LICENSE create mode 100644 MIT-LICENSE create mode 100644 tests/test_rrcache.py diff --git a/.coveragerc b/.coveragerc new file mode 100644 index 0000000..e77617c --- /dev/null +++ b/.coveragerc @@ -0,0 +1,5 @@ +[report] +omit = + */pyshared/* + */python?.?/* + */site-packages/nose/* diff --git a/Changes b/Changes index 8da350c..67af7e8 100644 --- a/Changes +++ b/Changes @@ -1,3 +1,3 @@ -0.0.1 Development +0.1.0 2014-03-27 -* Initial alpha release. +* Initial release. diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 380c344..0000000 --- a/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Thomas Kemmer - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/MANIFEST.in b/MANIFEST.in index c9346d9..5217935 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,6 +1,6 @@ include Changes -include LICENSE include MANIFEST.in +include MIT-LICENSE include README.rst recursive-include tests *.py diff --git a/MIT-LICENSE b/MIT-LICENSE new file mode 100644 index 0000000..380c344 --- /dev/null +++ b/MIT-LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2014 Thomas Kemmer + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/README.rst b/README.rst index e310f47..bb1067b 100644 --- a/README.rst +++ b/README.rst @@ -3,22 +3,35 @@ cachetools This module provides various memoizing collections and function decorators, including a variant of the Python 3 Standard Library -lru_cache_ decorator. - - Important Note: This module is in early pre-alpha, and not fit for - *any* purpose (yet). +`functools.lru_cache`_ decorator. .. code-block:: pycon - >>> from cachetools import LRUCache - >>> cache = LRUCache(maxsize=16) - >>> cache['test'] = 1 - >>> cache.info() - CacheInfo(hits=0, misses=0, maxsize=16, currsize=1) - >>> cache['test'] - 1 - >>> cache.info() - CacheInfo(hits=1, misses=0, maxsize=16, currsize=1) + >>> from cachetools import LRUCache + >>> cache = LRUCache(maxsize=2) + >>> cache['first'] = 1 + >>> cache['second'] = 2 + >>> cache + LRUCache(OrderedDict([('first', 1), ('second', 2)]), maxsize=2) + >>> cache['third'] = 3 + >>> cache + LRUCache(OrderedDict([('second', 2), ('third', 3)]), maxsize=2) + >>> cache['second'] + 2 + >>> cache + LRUCache(OrderedDict([('third', 3), ('second', 2)]), maxsize=2) + >>> cache['fourth'] = 4 + >>> cache + LRUCache(OrderedDict([('second', 2), ('fourth', 4)]), maxsize=2) + +For the purpose of this module, a *cache* is a mutable mapping_ of +fixed size, defined by its ``maxsize`` attribute. When the cache is +full, i.e. ``len(cache) == cache.maxsize``, the cache must choose +which item(s) to discard based on a suitable `cache algorithm`_. + +This module provides various cache implementations based on different +cache algorithms, as well as decorators for easily memoizing function +calls. Installation @@ -49,15 +62,17 @@ Project Resources License ------------------------------------------------------------------------ -cachetools is Copyright 2014 Thomas Kemmer. +Copyright 2014 Thomas Kemmer. Licensed under the `MIT License`_. -.. _lru_cache: http://docs.python.org/3.4/library/functools.html#functools.lru_cache +.. _functools.lru_cache: http://docs.python.org/3.4/library/functools.html#functools.lru_cache +.. _mapping: http://docs.python.org/dev/glossary.html#term-mapping +.. _cache algorithm: http://en.wikipedia.org/wiki/Cache_algorithms .. _Documentation: http://pythonhosted.org/cachetools/ .. _Source Code: https://github.com/tkem/cachetools/ .. _Issue Tracker: https://github.com/tkem/cachetools/issues/ -.. _Change Log: https://raw.github.com/tkem/cachetools/master/Changes -.. _MIT License: http://opensource.org/licenses/MIT +.. _Change Log: http://raw.github.com/tkem/cachetools/master/Changes +.. _MIT License: http://raw.github.com/tkem/cachetools/master/MIT-LICENSE diff --git a/cachetools.py b/cachetools.py index 72229a4..c0dc19b 100644 --- a/cachetools.py +++ b/cachetools.py @@ -1,147 +1,176 @@ -"""Python 2.7 memoizing collections and decorators""" +"""Extensible memoizing collections and decorators""" import collections import functools -import threading +import random -__version__ = '0.0.0' +try: + from threading import RLock +except ImportError: + from dummy_threading import RLock - -CacheInfo = collections.namedtuple('CacheInfo', 'hits misses maxsize currsize') +__version__ = '0.1.0' -class LRUCache(collections.MutableMapping): +class Cache(collections.MutableMapping): - def __init__(self, maxsize, lock=threading.RLock): - self.__maxsize = maxsize - self.__lock = lock() - self.__cache = collections.OrderedDict() - self.__hits = 0 - self.__misses = 0 + def __init__(self, maxsize, wrapped=None): + self.__wrapped__ = {} if wrapped is None else wrapped + self.maxsize = maxsize def __getitem__(self, key): - with self.__lock: - try: - value = self.__cache[key] - except KeyError: - self.__misses += 1 - raise - self.__hits += 1 - self._update(key, value) - return value + return self.__wrapped__[key] def __setitem__(self, key, value): - with self.__lock: - if len(self.__cache) >= self.__maxsize: - # FIXME: popitem - del self.__cache[next(iter(self.__cache))] - self.__cache[key] = value - self._update(key, value) + while len(self) >= self.maxsize: + self.popitem() + self.__wrapped__[key] = value def __delitem__(self, key): - with self._lock: - del self.__cache[key] + del self.__wrapped__[key] def __iter__(self): - return iter(self.__cache) + return iter(self.__wrapped__) def __len__(self): - return len(self.__cache) + return len(self.__wrapped__) + + def __repr__(self): + return '%s(%r, maxsize=%d)' % ( + self.__class__.__name__, + self.__wrapped__, + self.__maxsize, + ) + + @property + def maxsize(self): + return self.__maxsize + + @maxsize.setter + def maxsize(self, value): + if value < 1: + raise ValueError('maxsize must be >= 1') + while (len(self) > value): + self.popitem() + self.__maxsize = value + + +class LRUCache(Cache): + """Least Recently Used (LRU) cache implementation. + + Discards the least recently used items first to make space when + necessary. - def _update(self, key, value): - del self.__cache[key] - self.__cache[key] = value + This implementation uses :class:`collections.OrderedDict` to keep + track of item usage. + """ - def info(self): - return CacheInfo(self.__hits, self.__misses, self.__maxsize, len(self)) + # OrderedDict.move_to_end is only available in Python 3 + if hasattr(collections.OrderedDict, 'move_to_end'): + def __update(self, key): + self.__wrapped__.move_to_end(key) + else: + def __update(self, key): + self.__wrapped__[key] = self.__wrapped__.pop(key) + def __init__(self, maxsize): + Cache.__init__(self, maxsize, collections.OrderedDict()) -class LFUCache(collections.MutableMapping): + def __getitem__(self, key): + value = Cache.__getitem__(self, key) + self.__update(key) + return value + + def popitem(self): + return self.__wrapped__.popitem(False) + + +class LFUCache(Cache): + """Least Frequently Used (LFU) cache implementation. - def __init__(self, maxsize, lock=threading.RLock): - self.__maxsize = maxsize - self.__lock = lock() - self.__cache = {} - self.__count = collections.Counter() - self.__hits = 0 - self.__misses = 0 + Counts how often an item is needed, and discards the items used + least often to make space when necessary. + + This implementation uses :class:`collections.Counter` to keep + track of usage counts. + """ + + def __init__(self, maxsize): + Cache.__init__(self, maxsize) + self.__counter = collections.Counter() def __getitem__(self, key): - with self.__lock: - value = self.__cache[key] - self.__count[key] += 1 - return value + value = Cache.__getitem__(self, key) + self.__counter[key] += 1 + return value def __setitem__(self, key, value): - with self.__lock: - if len(self.__cache) >= self.__maxsize: - key, _ = self.__count.most_common()[-1] - del self.__cache[key] - del self.__count[key] - self.__cache[key] = value - self.__count[key] = 0 + Cache.__setitem__(self, key, value) + self.__counter[key] += 0 def __delitem__(self, key): - del self.__cache[key] - del self.__count[key] + Cache.__delitem__(self, key) + del self.__counter[key] - def __iter__(self): - return iter(self.__cache) + def popitem(self): + item = self.__counter.most_common()[-1] + self.pop(item[0]) + return item - def __len__(self): - return len(self.__cache) - def info(self): - return CacheInfo(self.__hits, self.__misses, self.__maxsize, len(self)) +class RRCache(Cache): + """Random Replacement (RR) cache implementation. + Randomly selects a candidate item and discards it to make space + when necessary. -def makekey(args, kwargs, typed=False): - # TODO: support typed argument keys - return (tuple(sorted(kwargs.items()))) + args + This implementations uses :func:`random.choice` to select the item + to be discarded. + """ + def __init__(self, maxsize): + Cache.__init__(self, maxsize) -def lru_cache(maxsize=128, typed=False, key=makekey): - def decorator(func): - cache = LRUCache(maxsize) + def popitem(self): + item = random.choice(list(self.items())) + self.pop(item[0]) + return item - @functools.wraps(func) - def wrapper(*args, **kwargs): - key = makekey(args, kwargs, typed) - try: - return cache[key] - except KeyError: - result = func(*args, **kwargs) - cache[key] = result - return result - def cache_info(): - return cache.info() +CacheInfo = collections.namedtuple('CacheInfo', 'hits misses maxsize currsize') - def cache_clear(): - cache.clear() - wrapper.cache_info = cache_info - wrapper.cache_clear = cache_clear - return wrapper +def _makekey(args, kwargs): + return (args, tuple(sorted(kwargs.items()))) - return decorator +def _makekey_typed(args, kwargs): + key = _makekey(args, kwargs) + key += tuple(type(v) for v in args) + key += tuple(type(v) for k, v in sorted(kwargs.items())) + return key -def lfu_cache(maxsize=128, typed=False, key=makekey): + +def _cachedfunc(cache, makekey, lock): def decorator(func): - cache = LRUCache(maxsize) + count = [0, 0] @functools.wraps(func) def wrapper(*args, **kwargs): - key = makekey(args, kwargs, typed) - try: - return cache[key] - except KeyError: - result = func(*args, **kwargs) + key = makekey(args, kwargs) + with lock: + try: + result = cache[key] + count[0] += 1 + return result + except KeyError: + count[1] += 1 + result = func(*args, **kwargs) + with lock: cache[key] = result - return result + return result def cache_info(): - return cache.info() + return CacheInfo(count[0], count[1], cache.maxsize, len(cache)) def cache_clear(): cache.clear() @@ -151,3 +180,36 @@ def lfu_cache(maxsize=128, typed=False, key=makekey): return wrapper return decorator + + +def lru_cache(maxsize=128, typed=False, lock=RLock): + """Decorator to wrap a function with a memoizing callable that + saves up to the `maxsize` most recent calls based on a Least + Recently Used (LRU) algorithm. + """ + if typed: + return _cachedfunc(LRUCache(maxsize), _makekey_typed, lock()) + else: + return _cachedfunc(LRUCache(maxsize), _makekey, lock()) + + +def lfu_cache(maxsize=128, typed=False, lock=RLock): + """Decorator to wrap a function with a memoizing callable that + saves up to the `maxsize` most recent calls based on a Least + Frequently Used (LFU) algorithm. + """ + if typed: + return _cachedfunc(LFUCache(maxsize), _makekey_typed, lock()) + else: + return _cachedfunc(LFUCache(maxsize), _makekey, lock()) + + +def rr_cache(maxsize=128, typed=False, lock=RLock): + """Decorator to wrap a function with a memoizing callable that + saves up to the `maxsize` most recent calls based on a Random + Replacement (RR) algorithm. + """ + if typed: + return _cachedfunc(RRCache(maxsize), _makekey_typed, lock()) + else: + return _cachedfunc(RRCache(maxsize), _makekey, lock()) diff --git a/docs/index.rst b/docs/index.rst index 7c37641..8b1866d 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -1,29 +1,43 @@ -:mod:`cachetools` --- Python 2.7 memoizing collections and decorators +:mod:`cachetools` --- Extensible memoizing collections and decorators ======================================================================= .. module:: cachetools This module provides various memoizing collections and function decorators, including a variant of the Python 3 Standard Library -lru_cache_ decorator. +:func:`functools.lru_cache` decorator. -.. note:: +.. code-block:: pycon - This module is in early pre-alpha, and not fit for *any* purpose - (yet). + >>> from cachetools import LRUCache + >>> cache = LRUCache(maxsize=2) + >>> cache['first'] = 1 + >>> cache['second'] = 2 + >>> cache + LRUCache(OrderedDict([('first', 1), ('second', 2)]), maxsize=2) + >>> cache['third'] = 3 + >>> cache + LRUCache(OrderedDict([('second', 2), ('third', 3)]), maxsize=2) + >>> cache['second'] + 2 + >>> cache + LRUCache(OrderedDict([('third', 3), ('second', 2)]), maxsize=2) + >>> cache['fourth'] = 4 + >>> cache + LRUCache(OrderedDict([('second', 2), ('fourth', 4)]), maxsize=2) -.. code-block:: pycon +For the purpose of this module, a *cache* is a mutable mapping_ of +fixed size, defined by its :attr:`maxsize` attribute. When the cache +is full, i.e. ``len(cache) == cache.maxsize``, the cache must choose +which item(s) to discard based on a suitable `cache algorithm`_. + +This module provides various cache implementations based on different +cache algorithms, as well as decorators for easily memoizing function +calls. - >>> from cachetools import LRUCache - >>> cache = LRUCache(maxsize=16) - >>> cache['test'] = 1 - >>> cache.info() - CacheInfo(hits=0, misses=0, maxsize=16, currsize=1) - >>> cache['test'] - 1 - >>> cache.info() - CacheInfo(hits=1, misses=0, maxsize=16, currsize=1) +Cache Classes +------------------------------------------------------------------------ .. autoclass:: LRUCache :members: @@ -31,9 +45,36 @@ lru_cache_ decorator. .. autoclass:: LFUCache :members: +.. autoclass:: RRCache + :members: + + +Function Decorators +------------------------------------------------------------------------ + +This module provides several memoizing function decorators compatible +with --- though not necessarily as efficient as --- the Python 3 +Standard Library :func:`functools.lru_cache` decorator. + +All decorators feature two optional arguments, which should be +specified as keyword arguments for compatibility with future +extensions: + +If `typed` is set to :const:`True`, function arguments of different +types will be cached separately. + +`lock` specifies a function of zero arguments that returns a `context +manager`_ to lock the cache when necessary. If not specified, a +:class:`threading.RLock` will be used for synchronizing access from +multiple threads. .. autofunction:: lru_cache .. autofunction:: lfu_cache -.. _lru_cache: http://docs.python.org/3/library/functools.html#functools.lru_cache +.. autofunction:: rr_cache + + +.. _mapping: http://docs.python.org/dev/glossary.html#term-mapping +.. _cache algorithm: http://en.wikipedia.org/wiki/Cache_algorithms +.. _context manager: http://docs.python.org/dev/glossary.html#term-context-manager diff --git a/setup.py b/setup.py index e165d15..f0cfd7b 100644 --- a/setup.py +++ b/setup.py @@ -14,11 +14,11 @@ setup( author_email='tkemmer@computer.org', url='https://github.com/tkem/cachetools', license='MIT', - description='Python 2.7 memoizing collections and decorators', # noqa + description='Extensible memoizing collections and decorators', # noqa long_description=open('README.rst').read(), keywords='cache caching lru lfu ttl', classifiers=[ - 'Development Status :: 2 - Pre-Alpha', + 'Development Status :: 3 - Alpha', 'Environment :: Other Environment', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', @@ -26,10 +26,11 @@ setup( 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.2', 'Topic :: Internet', 'Topic :: Software Development :: Libraries :: Python Modules' ], py_modules=['cachetools'], - test_suite='nose.collector', - tests_require=['nose'] + test_suite='tests' ) diff --git a/tests/test_lfucache.py b/tests/test_lfucache.py index f9e4e01..9f0de48 100644 --- a/tests/test_lfucache.py +++ b/tests/test_lfucache.py @@ -12,20 +12,24 @@ class LFUCacheTest(unittest.TestCase): def test_insert(self): cache = LFUCache(maxsize=2) - cache['a'] = 0 + cache['a'] = 1 + cache['a'] cache['b'] = 2 cache['c'] = 3 + self.assertEqual(len(cache), 2) self.assertEqual(cache['a'], 1) self.assertTrue('b' in cache or 'c' in cache) self.assertTrue('b' not in cache or 'c' not in cache) - cache['a'] = 4 - self.assertEqual(cache['a'], 4) + cache['d'] = 4 + self.assertEqual(len(cache), 2) + self.assertEqual(cache['d'], 4) + self.assertEqual(cache['a'], 1) def test_decorator(self): self.assertEqual(cached(1), 1) - self.assertItemsEqual(cached.cache_info(), [0, 1, 2, 1]) + self.assertEqual(cached.cache_info(), (0, 1, 2, 1)) self.assertEqual(cached(1), 1) - self.assertItemsEqual(cached.cache_info(), [1, 1, 2, 1]) + self.assertEqual(cached.cache_info(), (1, 1, 2, 1)) diff --git a/tests/test_lrucache.py b/tests/test_lrucache.py index d5eea5d..da1104d 100644 --- a/tests/test_lrucache.py +++ b/tests/test_lrucache.py @@ -12,26 +12,31 @@ class LRUCacheTest(unittest.TestCase): def test_insert(self): cache = LRUCache(maxsize=2) + cache['a'] = 1 cache['b'] = 2 cache['c'] = 3 + self.assertEqual(len(cache), 2) self.assertEqual(cache['b'], 2) self.assertEqual(cache['c'], 3) self.assertNotIn('a', cache) - cache['a'] = 4 - self.assertEqual(cache['a'], 4) - self.assertEqual(cache['c'], 3) - self.assertNotIn('b', cache) + cache['b'] + cache['d'] = 4 + self.assertEqual(len(cache), 2) + self.assertEqual(cache['b'], 2) + self.assertEqual(cache['d'], 4) + self.assertNotIn('c', cache) - cache['b'] = 5 - self.assertEqual(cache['b'], 5) - self.assertEqual(cache['c'], 3) - self.assertNotIn('a', cache) + cache['e'] = 5 + self.assertEqual(len(cache), 2) + self.assertEqual(cache['d'], 4) + self.assertEqual(cache['e'], 5) + self.assertNotIn('b', cache) def test_decorator(self): self.assertEqual(cached(1), 1) - self.assertItemsEqual(cached.cache_info(), [0, 1, 2, 1]) + self.assertEqual(cached.cache_info(), (0, 1, 2, 1)) self.assertEqual(cached(1), 1) - self.assertItemsEqual(cached.cache_info(), [1, 1, 2, 1]) + self.assertEqual(cached.cache_info(), (1, 1, 2, 1)) diff --git a/tests/test_rrcache.py b/tests/test_rrcache.py new file mode 100644 index 0000000..c4bb7d2 --- /dev/null +++ b/tests/test_rrcache.py @@ -0,0 +1,29 @@ +import unittest + +from cachetools import RRCache, rr_cache + + +@rr_cache(maxsize=2) +def cached(n): + return n + + +class RRCacheTest(unittest.TestCase): + + def test_insert(self): + cache = RRCache(maxsize=2) + + cache['a'] = 1 + cache['b'] = 2 + cache['c'] = 3 + + self.assertEqual(len(cache), 2) + self.assertTrue('a' in cache or ('b' in cache and 'c' in cache)) + self.assertTrue('b' in cache or ('a' in cache and 'c' in cache)) + self.assertTrue('c' in cache or ('a' in cache and 'b' in cache)) + + def test_decorator(self): + self.assertEqual(cached(1), 1) + self.assertEqual(cached.cache_info(), (0, 1, 2, 1)) + self.assertEqual(cached(1), 1) + self.assertEqual(cached.cache_info(), (1, 1, 2, 1)) -- cgit v1.2.3 From 3b9d81924ae6b4011048096cf12a7cdad5508f30 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Thu, 27 Mar 2014 06:23:04 +0100 Subject: Prepare v0.2.0 --- .coveragerc | 5 -- Changes | 5 ++ LICENSE | 20 ++++++++ MANIFEST.in | 2 +- MIT-LICENSE | 20 -------- README.rst | 4 +- cachetools.py | 128 ++++++++++++++++++++++++++----------------------- docs/index.rst | 84 +++++++++++++++++++++++++++----- tests/test_cache.py | 41 ++++++++++++++++ tests/test_lfucache.py | 21 ++++++++ tests/test_lrucache.py | 22 +++++++++ tests/test_rrcache.py | 21 ++++++++ 12 files changed, 273 insertions(+), 100 deletions(-) delete mode 100644 .coveragerc create mode 100644 LICENSE delete mode 100644 MIT-LICENSE create mode 100644 tests/test_cache.py diff --git a/.coveragerc b/.coveragerc deleted file mode 100644 index e77617c..0000000 --- a/.coveragerc +++ /dev/null @@ -1,5 +0,0 @@ -[report] -omit = - */pyshared/* - */python?.?/* - */site-packages/nose/* diff --git a/Changes b/Changes index 67af7e8..310f0e1 100644 --- a/Changes +++ b/Changes @@ -1,3 +1,8 @@ +0.2.0 2014-04-02 + +* Add @cache decorator. +* Update documentation. + 0.1.0 2014-03-27 * Initial release. diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..380c344 --- /dev/null +++ b/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2014 Thomas Kemmer + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/MANIFEST.in b/MANIFEST.in index 5217935..c9346d9 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,6 +1,6 @@ include Changes +include LICENSE include MANIFEST.in -include MIT-LICENSE include README.rst recursive-include tests *.py diff --git a/MIT-LICENSE b/MIT-LICENSE deleted file mode 100644 index 380c344..0000000 --- a/MIT-LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Thomas Kemmer - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/README.rst b/README.rst index bb1067b..cd5ea58 100644 --- a/README.rst +++ b/README.rst @@ -31,7 +31,7 @@ which item(s) to discard based on a suitable `cache algorithm`_. This module provides various cache implementations based on different cache algorithms, as well as decorators for easily memoizing function -calls. +calls, and utilities for creating custom cache implementations. Installation @@ -75,4 +75,4 @@ Licensed under the `MIT License`_. .. _Source Code: https://github.com/tkem/cachetools/ .. _Issue Tracker: https://github.com/tkem/cachetools/issues/ .. _Change Log: http://raw.github.com/tkem/cachetools/master/Changes -.. _MIT License: http://raw.github.com/tkem/cachetools/master/MIT-LICENSE +.. _MIT License: http://raw.github.com/tkem/cachetools/master/LICENSE diff --git a/cachetools.py b/cachetools.py index c0dc19b..33b080f 100644 --- a/cachetools.py +++ b/cachetools.py @@ -8,53 +8,63 @@ try: except ImportError: from dummy_threading import RLock -__version__ = '0.1.0' +__version__ = '0.2.0' -class Cache(collections.MutableMapping): +def cache(cls): + """Class decorator that wraps any mutable mapping to work as a + cache.""" - def __init__(self, maxsize, wrapped=None): - self.__wrapped__ = {} if wrapped is None else wrapped - self.maxsize = maxsize + class Cache(collections.MutableMapping): - def __getitem__(self, key): - return self.__wrapped__[key] + __wrapped__ = cls - def __setitem__(self, key, value): - while len(self) >= self.maxsize: - self.popitem() - self.__wrapped__[key] = value + def __init__(self, maxsize, *args, **kwargs): + self.__wrapped__ = cls(*args, **kwargs) + self.maxsize = maxsize - def __delitem__(self, key): - del self.__wrapped__[key] + def __getitem__(self, key): + return self.__wrapped__[key] + + def __setitem__(self, key, value): + while len(self) >= self.maxsize: + self.popitem() + self.__wrapped__[key] = value + + def __delitem__(self, key): + del self.__wrapped__[key] + + def __iter__(self): + return iter(self.__wrapped__) + + def __len__(self): + return len(self.__wrapped__) - def __iter__(self): - return iter(self.__wrapped__) + def __repr__(self): + return '%s(%r, maxsize=%d)' % ( + self.__class__.__name__, + self.__wrapped__, + self.__maxsize, + ) - def __len__(self): - return len(self.__wrapped__) + @property + def maxsize(self): + return self.__maxsize - def __repr__(self): - return '%s(%r, maxsize=%d)' % ( - self.__class__.__name__, - self.__wrapped__, - self.__maxsize, - ) + @maxsize.setter + def maxsize(self, value): + if not value > 0: + raise ValueError('maxsize must be > 0') + while (len(self) > value): + self.popitem() + self.__maxsize = value - @property - def maxsize(self): - return self.__maxsize + # TODO: functools.update_wrapper() for class decorators? - @maxsize.setter - def maxsize(self, value): - if value < 1: - raise ValueError('maxsize must be >= 1') - while (len(self) > value): - self.popitem() - self.__maxsize = value + return Cache -class LRUCache(Cache): +class LRUCache(cache(collections.OrderedDict)): """Least Recently Used (LRU) cache implementation. Discards the least recently used items first to make space when @@ -66,25 +76,21 @@ class LRUCache(Cache): # OrderedDict.move_to_end is only available in Python 3 if hasattr(collections.OrderedDict, 'move_to_end'): - def __update(self, key): + def __getitem__(self, key): + value = self.__wrapped__[key] self.__wrapped__.move_to_end(key) + return value else: - def __update(self, key): - self.__wrapped__[key] = self.__wrapped__.pop(key) - - def __init__(self, maxsize): - Cache.__init__(self, maxsize, collections.OrderedDict()) - - def __getitem__(self, key): - value = Cache.__getitem__(self, key) - self.__update(key) - return value + def __getitem__(self, key): + value = self.__wrapped__.pop(key) + self.__wrapped__[key] = value + return value def popitem(self): return self.__wrapped__.popitem(False) -class LFUCache(Cache): +class LFUCache(cache(dict)): """Least Frequently Used (LFU) cache implementation. Counts how often an item is needed, and discards the items used @@ -94,30 +100,33 @@ class LFUCache(Cache): track of usage counts. """ - def __init__(self, maxsize): - Cache.__init__(self, maxsize) + def __init__(self, maxsize, *args, **kwargs): + super(LFUCache, self).__init__(maxsize, *args, **kwargs) self.__counter = collections.Counter() def __getitem__(self, key): - value = Cache.__getitem__(self, key) + value = super(LFUCache, self).__getitem__(key) self.__counter[key] += 1 return value def __setitem__(self, key, value): - Cache.__setitem__(self, key, value) + super(LFUCache, self).__setitem__(key, value) self.__counter[key] += 0 def __delitem__(self, key): - Cache.__delitem__(self, key) + super(LFUCache, self).__delitem__(key) del self.__counter[key] def popitem(self): - item = self.__counter.most_common()[-1] - self.pop(item[0]) + try: + item = self.__counter.most_common()[-1] + except IndexError: + raise KeyError + super(LFUCache, self).pop(item[0]) return item -class RRCache(Cache): +class RRCache(cache(dict)): """Random Replacement (RR) cache implementation. Randomly selects a candidate item and discards it to make space @@ -127,11 +136,11 @@ class RRCache(Cache): to be discarded. """ - def __init__(self, maxsize): - Cache.__init__(self, maxsize) - def popitem(self): - item = random.choice(list(self.items())) + try: + item = random.choice(list(self.items())) + except IndexError: + raise KeyError self.pop(item[0]) return item @@ -154,7 +163,6 @@ def _cachedfunc(cache, makekey, lock): def decorator(func): count = [0, 0] - @functools.wraps(func) def wrapper(*args, **kwargs): key = makekey(args, kwargs) with lock: @@ -177,7 +185,7 @@ def _cachedfunc(cache, makekey, lock): wrapper.cache_info = cache_info wrapper.cache_clear = cache_clear - return wrapper + return functools.update_wrapper(wrapper, func) return decorator diff --git a/docs/index.rst b/docs/index.rst index 8b1866d..47440b6 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -3,9 +3,9 @@ .. module:: cachetools -This module provides various memoizing collections and function -decorators, including a variant of the Python 3 Standard Library -:func:`functools.lru_cache` decorator. +This module provides various memoizing collections and decorators, +including a variant of the Python 3 Standard Library +:func:`functools.lru_cache` function decorator. .. code-block:: pycon @@ -33,10 +33,10 @@ which item(s) to discard based on a suitable `cache algorithm`_. This module provides various cache implementations based on different cache algorithms, as well as decorators for easily memoizing function -calls. +calls, and utilities for creating custom cache implementations. -Cache Classes +Cache Implementations ------------------------------------------------------------------------ .. autoclass:: LRUCache @@ -56,23 +56,83 @@ This module provides several memoizing function decorators compatible with --- though not necessarily as efficient as --- the Python 3 Standard Library :func:`functools.lru_cache` decorator. -All decorators feature two optional arguments, which should be -specified as keyword arguments for compatibility with future -extensions: +In addition to a `maxsize` parameter, all decorators feature two +optional arguments, which should be specified as keyword arguments for +compatibility with future extensions: If `typed` is set to :const:`True`, function arguments of different types will be cached separately. `lock` specifies a function of zero arguments that returns a `context -manager`_ to lock the cache when necessary. If not specified, a +manager`_ to lock the cache when necessary. If not specified, :class:`threading.RLock` will be used for synchronizing access from multiple threads. -.. autofunction:: lru_cache +The wrapped function is instrumented with :func:`cache_info` and +:func:`cache_clear` functions to provide information about cache +performance and clear the cache. See the :func:`functools.lru_cache` +documentation for details. -.. autofunction:: lfu_cache +Unlike :func:`functools.lru_cache`, setting `maxsize` to zero or +:const:`None` is not supported. -.. autofunction:: rr_cache +.. decorator:: lru_cache(maxsize=128, typed=False, lock=threading.RLock) + + Decorator to wrap a function with a memoizing callable that saves + up to the `maxsize` most recent calls based on a Least Recently + Used (LRU) algorithm. + +.. decorator:: lfu_cache(maxsize=128, typed=False, lock=threading.RLock) + + Decorator to wrap a function with a memoizing callable that saves + up to the `maxsize` most recent calls based on a Least Frequently + Used (LFU) algorithm. + +.. decorator:: rr_cache(maxsize=128, typed=False, lock=threading.RLock) + + Decorator to wrap a function with a memoizing callable that saves + up to the `maxsize` most recent calls based on a Random Replacement + (RR) algorithm. + + +Class Decorators +------------------------------------------------------------------------ + +.. decorator:: cache + + Class decorator that wraps any mutable mapping to work as a cache. + + This class decorator may be useful when implementing new cache + classes. It converts any mutable mapping into a cache-like class + with a :attr:`maxsize` attribute. If :func:`__setitem__` is called + when the cache is full, i.e. ``len(self) == self.maxsize``, + :func:`popitem` is invoked to make room for new items:: + + @cache + class DictCache(dict): + pass + + c = DictCache(maxsize=2) + c['x'] = 1 + c['y'] = 2 + c['z'] = 3 # calls dict.popitem(c) + + The original underlying class or object is accessible through the + :attr:`__wrapped__` attribute. This is useful for subclasses that + need to access the original mapping object directly, e.g. to + implement their own version of :func:`popitem`. + + It is also possible, and arguably more comprehensible, to use the + wrapper class as a base class:: + + class OrderedDictCache(cache(collections.OrderedDict)): + def popitem(self): + return self.__wrapped__.popitem(last=False) # pop first item + + c = OrderedDictCache(maxsize=2) + c['x'] = 1 + c['y'] = 2 + c['z'] = 3 # removes 'x' .. _mapping: http://docs.python.org/dev/glossary.html#term-mapping diff --git a/tests/test_cache.py b/tests/test_cache.py new file mode 100644 index 0000000..5c32f2b --- /dev/null +++ b/tests/test_cache.py @@ -0,0 +1,41 @@ +import unittest + +import cachetools +import collections + + +@cachetools.cache +class DictCache(dict): + pass + + +@cachetools.cache +class OrderedDictCache(collections.OrderedDict): + pass + + +class CacheTest(unittest.TestCase): + + def test_dict_cache(self): + cache = DictCache(maxsize=2) + + cache['a'] = 1 + cache['b'] = 2 + cache['c'] = 3 + + self.assertEqual(len(cache), 2) + self.assertTrue('a' in cache or ('b' in cache and 'c' in cache)) + self.assertTrue('b' in cache or ('a' in cache and 'c' in cache)) + self.assertTrue('c' in cache or ('a' in cache and 'b' in cache)) + + def test_ordered_dict_cache(self): + cache = OrderedDictCache(maxsize=2) + + cache['a'] = 1 + cache['b'] = 2 + cache['c'] = 3 + + self.assertEqual(len(cache), 2) + self.assertNotIn('a', cache) + self.assertEqual(cache['b'], 2) + self.assertEqual(cache['c'], 3) diff --git a/tests/test_lfucache.py b/tests/test_lfucache.py index 9f0de48..d414e20 100644 --- a/tests/test_lfucache.py +++ b/tests/test_lfucache.py @@ -8,6 +8,11 @@ def cached(n): return n +@lfu_cache(maxsize=2, typed=True) +def cached_typed(n): + return n + + class LFUCacheTest(unittest.TestCase): def test_insert(self): @@ -33,3 +38,19 @@ class LFUCacheTest(unittest.TestCase): self.assertEqual(cached.cache_info(), (0, 1, 2, 1)) self.assertEqual(cached(1), 1) self.assertEqual(cached.cache_info(), (1, 1, 2, 1)) + self.assertEqual(cached(1.0), 1.0) + self.assertEqual(cached.cache_info(), (2, 1, 2, 1)) + + cached.cache_clear() + self.assertEqual(cached(1), 1) + self.assertEqual(cached.cache_info(), (2, 2, 2, 1)) + + def test_typed_decorator(self): + self.assertEqual(cached_typed(1), 1) + self.assertEqual(cached_typed.cache_info(), (0, 1, 2, 1)) + self.assertEqual(cached_typed(1), 1) + self.assertEqual(cached_typed.cache_info(), (1, 1, 2, 1)) + self.assertEqual(cached_typed(1.0), 1.0) + self.assertEqual(cached_typed.cache_info(), (1, 2, 2, 2)) + self.assertEqual(cached_typed(1.0), 1.0) + self.assertEqual(cached_typed.cache_info(), (2, 2, 2, 2)) diff --git a/tests/test_lrucache.py b/tests/test_lrucache.py index da1104d..a4ceb6e 100644 --- a/tests/test_lrucache.py +++ b/tests/test_lrucache.py @@ -8,6 +8,11 @@ def cached(n): return n +@lru_cache(maxsize=2, typed=True) +def cached_typed(n): + return n + + class LRUCacheTest(unittest.TestCase): def test_insert(self): @@ -36,7 +41,24 @@ class LRUCacheTest(unittest.TestCase): self.assertNotIn('b', cache) def test_decorator(self): + self.assertEqual(cached.cache_info(), (0, 0, 2, 0)) self.assertEqual(cached(1), 1) self.assertEqual(cached.cache_info(), (0, 1, 2, 1)) self.assertEqual(cached(1), 1) self.assertEqual(cached.cache_info(), (1, 1, 2, 1)) + self.assertEqual(cached(1.0), 1.0) + self.assertEqual(cached.cache_info(), (2, 1, 2, 1)) + + cached.cache_clear() + self.assertEqual(cached(1), 1) + self.assertEqual(cached.cache_info(), (2, 2, 2, 1)) + + def test_typed_decorator(self): + self.assertEqual(cached_typed(1), 1) + self.assertEqual(cached_typed.cache_info(), (0, 1, 2, 1)) + self.assertEqual(cached_typed(1), 1) + self.assertEqual(cached_typed.cache_info(), (1, 1, 2, 1)) + self.assertEqual(cached_typed(1.0), 1.0) + self.assertEqual(cached_typed.cache_info(), (1, 2, 2, 2)) + self.assertEqual(cached_typed(1.0), 1.0) + self.assertEqual(cached_typed.cache_info(), (2, 2, 2, 2)) diff --git a/tests/test_rrcache.py b/tests/test_rrcache.py index c4bb7d2..f8eb534 100644 --- a/tests/test_rrcache.py +++ b/tests/test_rrcache.py @@ -8,6 +8,11 @@ def cached(n): return n +@rr_cache(maxsize=2, typed=True) +def cached_typed(n): + return n + + class RRCacheTest(unittest.TestCase): def test_insert(self): @@ -27,3 +32,19 @@ class RRCacheTest(unittest.TestCase): self.assertEqual(cached.cache_info(), (0, 1, 2, 1)) self.assertEqual(cached(1), 1) self.assertEqual(cached.cache_info(), (1, 1, 2, 1)) + self.assertEqual(cached(1.0), 1.0) + self.assertEqual(cached.cache_info(), (2, 1, 2, 1)) + + cached.cache_clear() + self.assertEqual(cached(1), 1) + self.assertEqual(cached.cache_info(), (2, 2, 2, 1)) + + def test_typed_decorator(self): + self.assertEqual(cached_typed(1), 1) + self.assertEqual(cached_typed.cache_info(), (0, 1, 2, 1)) + self.assertEqual(cached_typed(1), 1) + self.assertEqual(cached_typed.cache_info(), (1, 1, 2, 1)) + self.assertEqual(cached_typed(1.0), 1.0) + self.assertEqual(cached_typed.cache_info(), (1, 2, 2, 2)) + self.assertEqual(cached_typed(1.0), 1.0) + self.assertEqual(cached_typed.cache_info(), (2, 2, 2, 2)) -- cgit v1.2.3 From 573b318c65abe41f03cf2269356182e19f11caa1 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Wed, 2 Apr 2014 20:44:06 +0200 Subject: Update README.rst --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index cd5ea58..073a62b 100644 --- a/README.rst +++ b/README.rst @@ -62,7 +62,7 @@ Project Resources License ------------------------------------------------------------------------ -Copyright 2014 Thomas Kemmer. +Copyright (c) 2014 Thomas Kemmer. Licensed under the `MIT License`_. -- cgit v1.2.3 From 5543675c8e5e7771d48fe81f880ad690c44900f8 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Tue, 6 May 2014 18:14:33 +0200 Subject: Add size, getsizeof cache members. --- cachetools.py | 176 ++++++++++++++++++++++++++----------------------- docs/index.rst | 84 +++++++++-------------- setup.py | 6 +- tests/test_cache.py | 36 +++++----- tests/test_lfucache.py | 22 +++++++ tests/test_lrucache.py | 23 +++++++ tests/test_rrcache.py | 22 +++++++ 7 files changed, 212 insertions(+), 157 deletions(-) diff --git a/cachetools.py b/cachetools.py index 33b080f..781b59f 100644 --- a/cachetools.py +++ b/cachetools.py @@ -1,6 +1,8 @@ """Extensible memoizing collections and decorators""" + import collections import functools +import operator import random try: @@ -8,63 +10,66 @@ try: except ImportError: from dummy_threading import RLock -__version__ = '0.2.0' - - -def cache(cls): - """Class decorator that wraps any mutable mapping to work as a - cache.""" +__version__ = '0.3.0' - class Cache(collections.MutableMapping): - __wrapped__ = cls +class _Cache(collections.MutableMapping): + """Class that wraps a mutable mapping to work as a cache.""" - def __init__(self, maxsize, *args, **kwargs): - self.__wrapped__ = cls(*args, **kwargs) - self.maxsize = maxsize + def __init__(self, mapping, maxsize): + self.__data = mapping + self.__size = sum(map(self.getsizeof, mapping.values()), 0) + self.maxsize = maxsize - def __getitem__(self, key): - return self.__wrapped__[key] + def __getitem__(self, key): + return self.__data[key] - def __setitem__(self, key, value): - while len(self) >= self.maxsize: - self.popitem() - self.__wrapped__[key] = value + def __setitem__(self, key, value): + size = self.getsizeof(value) + if size > self.maxsize: + raise ValueError + while self.size > self.maxsize - size: + self.pop(next(iter(self))) + self.__data[key] = value + self.__size += size - def __delitem__(self, key): - del self.__wrapped__[key] + def __delitem__(self, key): + self.__size -= self.getsizeof(self.__data.pop(key)) - def __iter__(self): - return iter(self.__wrapped__) + def __iter__(self): + return iter(self.__data) - def __len__(self): - return len(self.__wrapped__) + def __len__(self): + return len(self.__data) - def __repr__(self): - return '%s(%r, maxsize=%d)' % ( - self.__class__.__name__, - self.__wrapped__, - self.__maxsize, - ) + def __repr__(self): + return '%s(%r, size=%d, maxsize=%d)' % ( + self.__class__.__name__, + self.__data, + self.__size, + self.__maxsize, + ) - @property - def maxsize(self): - return self.__maxsize + @property + def size(self): + return self.__size - @maxsize.setter - def maxsize(self, value): - if not value > 0: - raise ValueError('maxsize must be > 0') - while (len(self) > value): - self.popitem() - self.__maxsize = value + @property + def maxsize(self): + return self.__maxsize - # TODO: functools.update_wrapper() for class decorators? + @maxsize.setter + def maxsize(self, value): + while self.size > value: + self.pop(next(iter(self))) + self.__maxsize = value - return Cache + @staticmethod + def getsizeof(_): + return 1 -class LRUCache(cache(collections.OrderedDict)): +class LRUCache(_Cache): """Least Recently Used (LRU) cache implementation. Discards the least recently used items first to make space when @@ -74,23 +79,30 @@ class LRUCache(cache(collections.OrderedDict)): track of item usage. """ - # OrderedDict.move_to_end is only available in Python 3 - if hasattr(collections.OrderedDict, 'move_to_end'): - def __getitem__(self, key): - value = self.__wrapped__[key] - self.__wrapped__.move_to_end(key) - return value - else: - def __getitem__(self, key): - value = self.__wrapped__.pop(key) - self.__wrapped__[key] = value - return value - - def popitem(self): - return self.__wrapped__.popitem(False) - - -class LFUCache(cache(dict)): + class OrderedDict(collections.OrderedDict): + # OrderedDict.move_to_end is only available in Python 3 + if hasattr(collections.OrderedDict, 'move_to_end'): + def __getitem__(self, key, + getitem=collections.OrderedDict.__getitem__): + self.move_to_end(key) + return getitem(self, key) + else: + def __getitem__(self, key, + getitem=collections.OrderedDict.__getitem__, + delitem=collections.OrderedDict.__delitem__, + setitem=collections.OrderedDict.__setitem__): + value = getitem(self, key) + delitem(self, key) + setitem(self, key, value) + return value + + def __init__(self, maxsize, getsizeof=None): + if getsizeof is not None: + self.getsizeof = getsizeof + _Cache.__init__(self, self.OrderedDict(), maxsize) + + +class LFUCache(_Cache): """Least Frequently Used (LFU) cache implementation. Counts how often an item is needed, and discards the items used @@ -100,49 +112,49 @@ class LFUCache(cache(dict)): track of usage counts. """ - def __init__(self, maxsize, *args, **kwargs): - super(LFUCache, self).__init__(maxsize, *args, **kwargs) + def __init__(self, maxsize, getsizeof=None): + if getsizeof is not None: + self.getsizeof = getsizeof + _Cache.__init__(self, {}, maxsize) self.__counter = collections.Counter() def __getitem__(self, key): - value = super(LFUCache, self).__getitem__(key) + value = _Cache.__getitem__(self, key) self.__counter[key] += 1 return value def __setitem__(self, key, value): - super(LFUCache, self).__setitem__(key, value) + _Cache.__setitem__(self, key, value) self.__counter[key] += 0 def __delitem__(self, key): - super(LFUCache, self).__delitem__(key) + _Cache.__delitem__(self, key) del self.__counter[key] - def popitem(self): - try: - item = self.__counter.most_common()[-1] - except IndexError: - raise KeyError - super(LFUCache, self).pop(item[0]) - return item + def __iter__(self): + items = reversed(self.__counter.most_common()) + return iter(map(operator.itemgetter(0), items)) -class RRCache(cache(dict)): +class RRCache(_Cache): """Random Replacement (RR) cache implementation. - Randomly selects a candidate item and discards it to make space + Randomly selects candidate items and discards then to make space when necessary. - This implementations uses :func:`random.choice` to select the item - to be discarded. + This implementations uses :func:`random.shuffle` to select the + items to be discarded. """ - def popitem(self): - try: - item = random.choice(list(self.items())) - except IndexError: - raise KeyError - self.pop(item[0]) - return item + def __init__(self, maxsize, getsizeof=None): + if getsizeof is not None: + self.getsizeof = getsizeof + _Cache.__init__(self, {}, maxsize) + + def __iter__(self): + keys = list(_Cache.__iter__(self)) + random.shuffle(keys) + return iter(keys) CacheInfo = collections.namedtuple('CacheInfo', 'hits misses maxsize currsize') diff --git a/docs/index.rst b/docs/index.rst index 47440b6..753d4a1 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -14,26 +14,36 @@ including a variant of the Python 3 Standard Library >>> cache['first'] = 1 >>> cache['second'] = 2 >>> cache - LRUCache(OrderedDict([('first', 1), ('second', 2)]), maxsize=2) + LRUCache(OrderedDict([('first', 1), ('second', 2)]), size=2, maxsize=2) >>> cache['third'] = 3 >>> cache - LRUCache(OrderedDict([('second', 2), ('third', 3)]), maxsize=2) + LRUCache(OrderedDict([('second', 2), ('third', 3)]), size=2, maxsize=2) >>> cache['second'] 2 >>> cache - LRUCache(OrderedDict([('third', 3), ('second', 2)]), maxsize=2) + LRUCache(OrderedDict([('third', 3), ('second', 2)]), size=2, maxsize=2) >>> cache['fourth'] = 4 >>> cache - LRUCache(OrderedDict([('second', 2), ('fourth', 4)]), maxsize=2) + LRUCache(OrderedDict([('second', 2), ('fourth', 4)]), size=2, maxsize=2) -For the purpose of this module, a *cache* is a mutable mapping_ of -fixed size, defined by its :attr:`maxsize` attribute. When the cache -is full, i.e. ``len(cache) == cache.maxsize``, the cache must choose -which item(s) to discard based on a suitable `cache algorithm`_. +For the purpose of this module, a *cache* is a mutable_ mapping_ with +additional attributes :attr:`size` and :attr:`maxsize`, which hold the +current and maximum size of the cache, and a (possibly static) method +:meth:`getsizeof`. + +The current size of the cache is the sum of the results of +:meth:`getsizeof` applied to each of the cache's values, +i.e. ``cache.size == sum(map(cache.getsizeof, cache.values()), 0)``. +As a special case, if :meth:`getsizeof` returns :const:`1` +irrespective of its argument, ``cache.size == len(cache)``. + +When the cache is full, i.e. ``cache.size > cache.maxsize``, the cache +must choose which item(s) to discard based on a suitable `cache +algorithm`_. This module provides various cache implementations based on different cache algorithms, as well as decorators for easily memoizing function -calls, and utilities for creating custom cache implementations. +and method calls. Cache Implementations @@ -56,13 +66,18 @@ This module provides several memoizing function decorators compatible with --- though not necessarily as efficient as --- the Python 3 Standard Library :func:`functools.lru_cache` decorator. -In addition to a `maxsize` parameter, all decorators feature two -optional arguments, which should be specified as keyword arguments for +In addition to a `maxsize` parameter, all decorators feature optional +arguments, which should be specified as keyword arguments for compatibility with future extensions: If `typed` is set to :const:`True`, function arguments of different types will be cached separately. +`getsizeof` specifies a function of one argument that will be applied +to each cache value to determine its size. The default value is +:const:`None`, which will assign each element an equal size of +:const:`1`. + `lock` specifies a function of zero arguments that returns a `context manager`_ to lock the cache when necessary. If not specified, :class:`threading.RLock` will be used for synchronizing access from @@ -76,65 +91,26 @@ documentation for details. Unlike :func:`functools.lru_cache`, setting `maxsize` to zero or :const:`None` is not supported. -.. decorator:: lru_cache(maxsize=128, typed=False, lock=threading.RLock) +.. decorator:: lru_cache(maxsize=128, typed=False, getsizeof=None, lock=threading.RLock) Decorator to wrap a function with a memoizing callable that saves up to the `maxsize` most recent calls based on a Least Recently Used (LRU) algorithm. -.. decorator:: lfu_cache(maxsize=128, typed=False, lock=threading.RLock) +.. decorator:: lfu_cache(maxsize=128, typed=False, getsizeof=None, lock=threading.RLock) Decorator to wrap a function with a memoizing callable that saves up to the `maxsize` most recent calls based on a Least Frequently Used (LFU) algorithm. -.. decorator:: rr_cache(maxsize=128, typed=False, lock=threading.RLock) +.. decorator:: rr_cache(maxsize=128, typed=False, getsizeof=None, lock=threading.RLock) Decorator to wrap a function with a memoizing callable that saves up to the `maxsize` most recent calls based on a Random Replacement (RR) algorithm. -Class Decorators ------------------------------------------------------------------------- - -.. decorator:: cache - - Class decorator that wraps any mutable mapping to work as a cache. - - This class decorator may be useful when implementing new cache - classes. It converts any mutable mapping into a cache-like class - with a :attr:`maxsize` attribute. If :func:`__setitem__` is called - when the cache is full, i.e. ``len(self) == self.maxsize``, - :func:`popitem` is invoked to make room for new items:: - - @cache - class DictCache(dict): - pass - - c = DictCache(maxsize=2) - c['x'] = 1 - c['y'] = 2 - c['z'] = 3 # calls dict.popitem(c) - - The original underlying class or object is accessible through the - :attr:`__wrapped__` attribute. This is useful for subclasses that - need to access the original mapping object directly, e.g. to - implement their own version of :func:`popitem`. - - It is also possible, and arguably more comprehensible, to use the - wrapper class as a base class:: - - class OrderedDictCache(cache(collections.OrderedDict)): - def popitem(self): - return self.__wrapped__.popitem(last=False) # pop first item - - c = OrderedDictCache(maxsize=2) - c['x'] = 1 - c['y'] = 2 - c['z'] = 3 # removes 'x' - - +.. _mutable: http://docs.python.org/dev/glossary.html#term-mutable .. _mapping: http://docs.python.org/dev/glossary.html#term-mapping .. _cache algorithm: http://en.wikipedia.org/wiki/Cache_algorithms .. _context manager: http://docs.python.org/dev/glossary.html#term-context-manager diff --git a/setup.py b/setup.py index f0cfd7b..7277345 100644 --- a/setup.py +++ b/setup.py @@ -16,9 +16,9 @@ setup( license='MIT', description='Extensible memoizing collections and decorators', # noqa long_description=open('README.rst').read(), - keywords='cache caching lru lfu ttl', + keywords='cache caching LRU LFU', classifiers=[ - 'Development Status :: 3 - Alpha', + 'Development Status :: 4 - Beta', 'Environment :: Other Environment', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', @@ -27,7 +27,7 @@ setup( 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.2', + 'Programming Language :: Python :: 3.4', 'Topic :: Internet', 'Topic :: Software Development :: Libraries :: Python Modules' ], diff --git a/tests/test_cache.py b/tests/test_cache.py index 5c32f2b..e2a58e6 100644 --- a/tests/test_cache.py +++ b/tests/test_cache.py @@ -4,38 +4,38 @@ import cachetools import collections -@cachetools.cache -class DictCache(dict): - pass - - -@cachetools.cache -class OrderedDictCache(collections.OrderedDict): - pass - - class CacheTest(unittest.TestCase): def test_dict_cache(self): - cache = DictCache(maxsize=2) + cache = cachetools._Cache({'a': 1, 'b': 2}, maxsize=2) + + self.assertEqual(len(cache), 2) + self.assertEqual(cache['a'], 1) + self.assertEqual(cache['b'], 2) - cache['a'] = 1 - cache['b'] = 2 cache['c'] = 3 self.assertEqual(len(cache), 2) - self.assertTrue('a' in cache or ('b' in cache and 'c' in cache)) - self.assertTrue('b' in cache or ('a' in cache and 'c' in cache)) - self.assertTrue('c' in cache or ('a' in cache and 'b' in cache)) + self.assertTrue('a' in cache or 'b' in cache) + self.assertEqual(cache['c'], 3) + + cache.maxsize = 1 + + self.assertEqual(len(cache), 1) + self.assertTrue('a' in cache or 'b' in cache or 'c' in cache) def test_ordered_dict_cache(self): - cache = OrderedDictCache(maxsize=2) + cache = cachetools._Cache(collections.OrderedDict(), maxsize=2) cache['a'] = 1 cache['b'] = 2 cache['c'] = 3 self.assertEqual(len(cache), 2) - self.assertNotIn('a', cache) self.assertEqual(cache['b'], 2) self.assertEqual(cache['c'], 3) + + cache.maxsize = 1 + + self.assertEqual(len(cache), 1) + self.assertEqual(cache['c'], 3) diff --git a/tests/test_lfucache.py b/tests/test_lfucache.py index d414e20..cfdf0ec 100644 --- a/tests/test_lfucache.py +++ b/tests/test_lfucache.py @@ -33,6 +33,28 @@ class LFUCacheTest(unittest.TestCase): self.assertEqual(cache['d'], 4) self.assertEqual(cache['a'], 1) + def test_getsizeof(self): + cache = LFUCache(maxsize=3, getsizeof=lambda x: x) + + cache['a'] = 1 + cache['b'] = 2 + + self.assertEqual(len(cache), 2) + self.assertEqual(cache['a'], 1) + self.assertEqual(cache['b'], 2) + + cache['c'] = 3 + + self.assertEqual(len(cache), 1) + self.assertEqual(cache['c'], 3) + self.assertNotIn('a', cache) + self.assertNotIn('b', cache) + + with self.assertRaises(ValueError): + cache['d'] = 4 + self.assertEqual(len(cache), 1) + self.assertEqual(cache['c'], 3) + def test_decorator(self): self.assertEqual(cached(1), 1) self.assertEqual(cached.cache_info(), (0, 1, 2, 1)) diff --git a/tests/test_lrucache.py b/tests/test_lrucache.py index a4ceb6e..21cc798 100644 --- a/tests/test_lrucache.py +++ b/tests/test_lrucache.py @@ -40,6 +40,29 @@ class LRUCacheTest(unittest.TestCase): self.assertEqual(cache['e'], 5) self.assertNotIn('b', cache) + def test_getsizeof(self): + cache = LRUCache(maxsize=3, getsizeof=lambda x: x) + + cache['a'] = 1 + cache['b'] = 2 + + self.assertEqual(len(cache), 2) + self.assertEqual(cache['a'], 1) + self.assertEqual(cache['b'], 2) + + cache['c'] = 3 + + self.assertEqual(len(cache), 1) + self.assertEqual(cache['c'], 3) + self.assertNotIn('a', cache) + self.assertNotIn('b', cache) + + with self.assertRaises(ValueError): + cache['d'] = 4 + self.assertEqual(len(cache), 1) + self.assertEqual(cache['c'], 3) + + def test_decorator(self): self.assertEqual(cached.cache_info(), (0, 0, 2, 0)) self.assertEqual(cached(1), 1) diff --git a/tests/test_rrcache.py b/tests/test_rrcache.py index f8eb534..088eedf 100644 --- a/tests/test_rrcache.py +++ b/tests/test_rrcache.py @@ -27,6 +27,28 @@ class RRCacheTest(unittest.TestCase): self.assertTrue('b' in cache or ('a' in cache and 'c' in cache)) self.assertTrue('c' in cache or ('a' in cache and 'b' in cache)) + def test_getsizeof(self): + cache = RRCache(maxsize=3, getsizeof=lambda x: x) + + cache['a'] = 1 + cache['b'] = 2 + + self.assertEqual(len(cache), 2) + self.assertEqual(cache['a'], 1) + self.assertEqual(cache['b'], 2) + + cache['c'] = 3 + + self.assertEqual(len(cache), 1) + self.assertEqual(cache['c'], 3) + self.assertNotIn('a', cache) + self.assertNotIn('b', cache) + + with self.assertRaises(ValueError): + cache['d'] = 4 + self.assertEqual(len(cache), 1) + self.assertEqual(cache['c'], 3) + def test_decorator(self): self.assertEqual(cached(1), 1) self.assertEqual(cached.cache_info(), (0, 1, 2, 1)) -- cgit v1.2.3 From 4953b0be1f852c607d95e5725327d2dce9f6337e Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Tue, 6 May 2014 18:57:09 +0200 Subject: Add cachedmethod decorator. --- cachetools.py | 24 ++++++++++++++++++++++++ docs/index.rst | 8 ++++++++ tests/test_lrucache.py | 1 - tests/test_method.py | 49 +++++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 81 insertions(+), 1 deletion(-) create mode 100644 tests/test_method.py diff --git a/cachetools.py b/cachetools.py index 781b59f..06240bc 100644 --- a/cachetools.py +++ b/cachetools.py @@ -233,3 +233,27 @@ def rr_cache(maxsize=128, typed=False, lock=RLock): return _cachedfunc(RRCache(maxsize), _makekey_typed, lock()) else: return _cachedfunc(RRCache(maxsize), _makekey, lock()) + + +def cachedmethod(getcache, typed=False): + """Decorator to wrap a class or instance method with a memoizing + callable. + + """ + + makekey = _makekey_typed if typed else _makekey + + def decorator(method): + def wrapper(self, *args, **kwargs): + cache = getcache(self) + key = makekey((self, method) + args, kwargs) + try: + return cache[key] + except KeyError: + result = method(self, *args, **kwargs) + cache[key] = result + return result + + return functools.update_wrapper(wrapper, method) + + return decorator diff --git a/docs/index.rst b/docs/index.rst index 753d4a1..9134e98 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -110,6 +110,14 @@ Unlike :func:`functools.lru_cache`, setting `maxsize` to zero or (RR) algorithm. +Method Decorators +------------------------------------------------------------------------ + +.. decorator:: cachedmethod(getcache, typed=False) + + Decorator to wrap a class or instance method with a memoizing callable. + + .. _mutable: http://docs.python.org/dev/glossary.html#term-mutable .. _mapping: http://docs.python.org/dev/glossary.html#term-mapping .. _cache algorithm: http://en.wikipedia.org/wiki/Cache_algorithms diff --git a/tests/test_lrucache.py b/tests/test_lrucache.py index 21cc798..c8487fe 100644 --- a/tests/test_lrucache.py +++ b/tests/test_lrucache.py @@ -62,7 +62,6 @@ class LRUCacheTest(unittest.TestCase): self.assertEqual(len(cache), 1) self.assertEqual(cache['c'], 3) - def test_decorator(self): self.assertEqual(cached.cache_info(), (0, 0, 2, 0)) self.assertEqual(cached(1), 1) diff --git a/tests/test_method.py b/tests/test_method.py new file mode 100644 index 0000000..40ab5ee --- /dev/null +++ b/tests/test_method.py @@ -0,0 +1,49 @@ +import unittest +import operator + +from cachetools import LRUCache, cachedmethod + + +class Cached(object): + + cache = LRUCache(maxsize=2) + + count = 0 + + @cachedmethod(operator.attrgetter('cache')) + def get(self, value): + count = self.count + self.count += 1 + return count + + @cachedmethod(operator.attrgetter('cache'), typed=True) + def get_typed(self, value): + count = self.count + self.count += 1 + return count + + +class MethodTest(unittest.TestCase): + + def test_decorator(self): + cached = Cached() + + self.assertEqual(cached.get(0), 0) + self.assertEqual(cached.get(1), 1) + self.assertEqual(cached.get(1), 1) + self.assertEqual(cached.get(1.0), 1) + self.assertEqual(cached.get(1.0), 1) + + cached.cache.clear() + self.assertEqual(cached.get(1), 2) + + def test_typed_decorator(self): + cached = Cached() + + self.assertEqual(cached.get_typed(0), 0) + self.assertEqual(cached.get_typed(1), 1) + self.assertEqual(cached.get_typed(1), 1) + self.assertEqual(cached.get_typed(1.0), 2) + self.assertEqual(cached.get_typed(1.0), 2) + self.assertEqual(cached.get_typed(0.0), 3) + self.assertEqual(cached.get_typed(0), 4) -- cgit v1.2.3 From bf206e065016d76eb4e1b666c77ad892f329ae00 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Tue, 6 May 2014 20:26:34 +0200 Subject: Update documentation. --- Changes | 6 +++++ README.rst | 49 +++++++++++++++++++++------------- cachetools.py | 84 ++++++++++++++++++++++++++++++---------------------------- docs/index.rst | 39 +++++++++++++++++++++------ 4 files changed, 110 insertions(+), 68 deletions(-) diff --git a/Changes b/Changes index 310f0e1..1fea5e6 100644 --- a/Changes +++ b/Changes @@ -1,3 +1,9 @@ +0.3.0 2014-05-06 + +* Remove @cache decorator. +* Add size, getsizeof members. +* Add @cachedmethod decorator. + 0.2.0 2014-04-02 * Add @cache decorator. diff --git a/README.rst b/README.rst index 073a62b..4b01431 100644 --- a/README.rst +++ b/README.rst @@ -1,9 +1,9 @@ cachetools ======================================================================== -This module provides various memoizing collections and function -decorators, including a variant of the Python 3 Standard Library -`functools.lru_cache`_ decorator. +This module provides various memoizing collections and decorators, +including a variant of the Python 3 Standard Library +`functools.lru_cache` function decorator. .. code-block:: pycon @@ -12,26 +12,36 @@ decorators, including a variant of the Python 3 Standard Library >>> cache['first'] = 1 >>> cache['second'] = 2 >>> cache - LRUCache(OrderedDict([('first', 1), ('second', 2)]), maxsize=2) + LRUCache(OrderedDict([('first', 1), ('second', 2)]), size=2, maxsize=2) >>> cache['third'] = 3 >>> cache - LRUCache(OrderedDict([('second', 2), ('third', 3)]), maxsize=2) + LRUCache(OrderedDict([('second', 2), ('third', 3)]), size=2, maxsize=2) >>> cache['second'] 2 >>> cache - LRUCache(OrderedDict([('third', 3), ('second', 2)]), maxsize=2) + LRUCache(OrderedDict([('third', 3), ('second', 2)]), size=2, maxsize=2) >>> cache['fourth'] = 4 >>> cache - LRUCache(OrderedDict([('second', 2), ('fourth', 4)]), maxsize=2) + LRUCache(OrderedDict([('second', 2), ('fourth', 4)]), size=2, maxsize=2) -For the purpose of this module, a *cache* is a mutable mapping_ of -fixed size, defined by its ``maxsize`` attribute. When the cache is -full, i.e. ``len(cache) == cache.maxsize``, the cache must choose -which item(s) to discard based on a suitable `cache algorithm`_. +For the purpose of this module, a *cache* is a mutable_ mapping_ with +additional attributes ``size`` and ``maxsize``, which hold the current +and maximum size of the cache, and a (possibly static) method +``getsizeof``. + +The current size of the cache is the sum of the results of +``getsizeof`` applied to each of the cache's values, i.e. ``cache.size +== sum(map(cache.getsizeof, cache.values()), 0)``. As a special case, +if ``getsizeof`` returns ``1`` irrespective of its argument, +``cache.size == len(cache)``. + +When the cache is full, i.e. ``cache.size > cache.maxsize``, the cache +must choose which item(s) to discard based on a suitable `cache +algorithm`_. This module provides various cache implementations based on different cache algorithms, as well as decorators for easily memoizing function -calls, and utilities for creating custom cache implementations. +and method calls. Installation @@ -45,19 +55,19 @@ Install cachetools using pip:: Project Resources ------------------------------------------------------------------------ -- `Documentation`_ -- `Issue Tracker`_ -- `Source Code`_ -- `Change Log`_ - -.. image:: https://pypip.in/v/cachetools/badge.png +.. image:: http://img.shields.io/pypi/v/cachetools.svg :target: https://pypi.python.org/pypi/cachetools/ :alt: Latest PyPI version -.. image:: https://pypip.in/d/cachetools/badge.png +.. image:: http://img.shields.io/pypi/dm/cachetools.svg :target: https://pypi.python.org/pypi/cachetools/ :alt: Number of PyPI downloads +- `Documentation`_ +- `Issue Tracker`_ +- `Source Code`_ +- `Change Log`_ + License ------------------------------------------------------------------------ @@ -68,6 +78,7 @@ Licensed under the `MIT License`_. .. _functools.lru_cache: http://docs.python.org/3.4/library/functools.html#functools.lru_cache +.. _mutable: http://docs.python.org/dev/glossary.html#term-mutable .. _mapping: http://docs.python.org/dev/glossary.html#term-mapping .. _cache algorithm: http://en.wikipedia.org/wiki/Cache_algorithms diff --git a/cachetools.py b/cachetools.py index 06240bc..2edac36 100644 --- a/cachetools.py +++ b/cachetools.py @@ -202,58 +202,60 @@ def _cachedfunc(cache, makekey, lock): return decorator -def lru_cache(maxsize=128, typed=False, lock=RLock): - """Decorator to wrap a function with a memoizing callable that - saves up to the `maxsize` most recent calls based on a Least - Recently Used (LRU) algorithm. - """ - if typed: - return _cachedfunc(LRUCache(maxsize), _makekey_typed, lock()) - else: - return _cachedfunc(LRUCache(maxsize), _makekey, lock()) +def _cachedmeth(getcache, makekey, lock): + def decorator(func): + def wrapper(self, *args, **kwargs): + key = makekey((func,) + args, kwargs) + cache = getcache(self) + with lock: + try: + return cache[key] + except KeyError: + pass + result = func(self, *args, **kwargs) + with lock: + cache[key] = result + return result + return functools.update_wrapper(wrapper, func) + + return decorator -def lfu_cache(maxsize=128, typed=False, lock=RLock): - """Decorator to wrap a function with a memoizing callable that - saves up to the `maxsize` most recent calls based on a Least - Frequently Used (LFU) algorithm. - """ - if typed: - return _cachedfunc(LFUCache(maxsize), _makekey_typed, lock()) - else: - return _cachedfunc(LFUCache(maxsize), _makekey, lock()) +def lru_cache(maxsize=128, typed=False, getsizeof=None, lock=RLock): + """Decorator to wrap a function with a memoizing callable that saves + up to `maxsize` results based on a Least Recently Used (LRU) + algorithm. -def rr_cache(maxsize=128, typed=False, lock=RLock): - """Decorator to wrap a function with a memoizing callable that - saves up to the `maxsize` most recent calls based on a Random - Replacement (RR) algorithm. """ - if typed: - return _cachedfunc(RRCache(maxsize), _makekey_typed, lock()) - else: - return _cachedfunc(RRCache(maxsize), _makekey, lock()) + makekey = _makekey_typed if typed else _makekey + return _cachedfunc(LRUCache(maxsize, getsizeof), makekey, lock()) -def cachedmethod(getcache, typed=False): - """Decorator to wrap a class or instance method with a memoizing - callable. +def lfu_cache(maxsize=128, typed=False, getsizeof=None, lock=RLock): + """Decorator to wrap a function with a memoizing callable that saves + up to `maxsize` results based on a Least Frequently Used (LFU) + algorithm. """ + makekey = _makekey_typed if typed else _makekey + return _cachedfunc(LFUCache(maxsize, getsizeof), makekey, lock()) + +def rr_cache(maxsize=128, typed=False, getsizeof=None, lock=RLock): + """Decorator to wrap a function with a memoizing callable that saves + up to `maxsize` results based on a Random Replacement (RR) + algorithm. + + """ makekey = _makekey_typed if typed else _makekey + return _cachedfunc(RRCache(maxsize, getsizeof), makekey, lock()) - def decorator(method): - def wrapper(self, *args, **kwargs): - cache = getcache(self) - key = makekey((self, method) + args, kwargs) - try: - return cache[key] - except KeyError: - result = method(self, *args, **kwargs) - cache[key] = result - return result - return functools.update_wrapper(wrapper, method) +def cachedmethod(getcache, typed=False, lock=RLock): + """Decorator to wrap a class or instance method with a memoizing + callable that saves results in a (possibly shared) cache. - return decorator + """ + makekey = _makekey_typed if typed else _makekey + return _cachedmeth(getcache, makekey, lock()) diff --git a/docs/index.rst b/docs/index.rst index 9134e98..0c78a17 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -94,28 +94,51 @@ Unlike :func:`functools.lru_cache`, setting `maxsize` to zero or .. decorator:: lru_cache(maxsize=128, typed=False, getsizeof=None, lock=threading.RLock) Decorator to wrap a function with a memoizing callable that saves - up to the `maxsize` most recent calls based on a Least Recently - Used (LRU) algorithm. + up to `maxsize` results based on a Least Recently Used (LRU) + algorithm. .. decorator:: lfu_cache(maxsize=128, typed=False, getsizeof=None, lock=threading.RLock) Decorator to wrap a function with a memoizing callable that saves - up to the `maxsize` most recent calls based on a Least Frequently - Used (LFU) algorithm. + up to `maxsize` results based on a Least Frequently Used (LFU) + algorithm. .. decorator:: rr_cache(maxsize=128, typed=False, getsizeof=None, lock=threading.RLock) Decorator to wrap a function with a memoizing callable that saves - up to the `maxsize` most recent calls based on a Random Replacement - (RR) algorithm. + up to `maxsize` results based on a Random Replacement (RR) + algorithm. Method Decorators ------------------------------------------------------------------------ -.. decorator:: cachedmethod(getcache, typed=False) +.. decorator:: cachedmethod(getcache, typed=False, lock=threading.RLock) - Decorator to wrap a class or instance method with a memoizing callable. + Decorator to wrap a class or instance method with a memoizing + callable that saves results in a (possibly shared) cache. + + `getcache` specifies a function of one argument that, when passed + :const:`self`, will return the cache object for the instance or + class. See the `Function Decorators`_ section for details on the + other arguments. + + Python 3 example of a shared (class) LRU cache for static web + content:: + + class CachedPEPs(object): + + cache = LRUCache(maxsize=32) + + @cachedmethod(operator.attrgetter('cache')) + def get_pep(self, num): + """Retrieve text of a Python Enhancement Proposal""" + resource = 'http://www.python.org/dev/peps/pep-%04d/' % num + try: + with urllib.request.urlopen(resource) as s: + return s.read() + except urllib.error.HTTPError: + return 'Not Found' .. _mutable: http://docs.python.org/dev/glossary.html#term-mutable -- cgit v1.2.3 From fad89ed62e58f5d543d92b4d96f86b309d9e12ec Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Wed, 7 May 2014 18:22:21 +0200 Subject: Fix #2: Report cache.size in cache_info() --- cachetools.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cachetools.py b/cachetools.py index 2edac36..24609ac 100644 --- a/cachetools.py +++ b/cachetools.py @@ -190,7 +190,7 @@ def _cachedfunc(cache, makekey, lock): return result def cache_info(): - return CacheInfo(count[0], count[1], cache.maxsize, len(cache)) + return CacheInfo(count[0], count[1], cache.maxsize, cache.size) def cache_clear(): cache.clear() -- cgit v1.2.3 From 5aeba23f69d2ee4c394f5f7723546f03199f59b8 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Wed, 7 May 2014 18:25:02 +0200 Subject: Fix #1: Add proper locking for cache_clear(), cache_info() --- cachetools.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/cachetools.py b/cachetools.py index 24609ac..040fae7 100644 --- a/cachetools.py +++ b/cachetools.py @@ -173,27 +173,29 @@ def _makekey_typed(args, kwargs): def _cachedfunc(cache, makekey, lock): def decorator(func): - count = [0, 0] + stats = [0, 0] def wrapper(*args, **kwargs): key = makekey(args, kwargs) with lock: try: result = cache[key] - count[0] += 1 + stats[0] += 1 return result except KeyError: - count[1] += 1 + stats[1] += 1 result = func(*args, **kwargs) with lock: cache[key] = result return result def cache_info(): - return CacheInfo(count[0], count[1], cache.maxsize, cache.size) + with lock: + return CacheInfo(stats[0], stats[1], cache.maxsize, cache.size) def cache_clear(): - cache.clear() + with lock: + cache.clear() wrapper.cache_info = cache_info wrapper.cache_clear = cache_clear -- cgit v1.2.3 From a29e4f06015320c2a294aad6a4564f0297e97e49 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Wed, 7 May 2014 18:28:15 +0200 Subject: Prepare v0.3.1 --- Changes | 5 +++++ cachetools.py | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/Changes b/Changes index 1fea5e6..3aa2519 100644 --- a/Changes +++ b/Changes @@ -1,3 +1,8 @@ +0.3.1 2014-05-07 + +* Add proper locking for cache_clear(), cache_info(). +* Report cache.size in cache_info(). + 0.3.0 2014-05-06 * Remove @cache decorator. diff --git a/cachetools.py b/cachetools.py index 040fae7..ccc42f9 100644 --- a/cachetools.py +++ b/cachetools.py @@ -10,7 +10,7 @@ try: except ImportError: from dummy_threading import RLock -__version__ = '0.3.0' +__version__ = '0.3.1' class _Cache(collections.MutableMapping): -- cgit v1.2.3 From 059216bd26fd6ae308fe1b378c1c3f4aa114d052 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Mon, 2 Jun 2014 19:05:20 +0200 Subject: Add TTLCache, refactor base class --- cachetools.py | 342 ++++++++++++++++++++++++++++++++++--------------- docs/index.rst | 46 ++++--- tests/__init__.py | 126 ++++++++++++++++++ tests/test_cache.py | 41 +----- tests/test_lfucache.py | 54 ++++---- tests/test_lrucache.py | 64 ++++----- tests/test_rrcache.py | 38 +----- tests/test_ttlcache.py | 75 +++++++++++ 8 files changed, 544 insertions(+), 242 deletions(-) create mode 100644 tests/test_ttlcache.py diff --git a/cachetools.py b/cachetools.py index ccc42f9..24ef617 100644 --- a/cachetools.py +++ b/cachetools.py @@ -4,157 +4,294 @@ import collections import functools import operator import random +import time try: from threading import RLock except ImportError: from dummy_threading import RLock -__version__ = '0.3.1' +__version__ = '0.4.0' +_marker = object() -class _Cache(collections.MutableMapping): - """Class that wraps a mutable mapping to work as a cache.""" - def __init__(self, mapping, maxsize): - self.__data = mapping - self.__size = sum(map(self.getsizeof, mapping.values()), 0) - self.maxsize = maxsize +class _Link(object): + __slots__ = 'prev', 'next', 'data' + + +class Cache(collections.MutableMapping): + """Mutable mapping to serve as a cache. + + This class discards arbitrary items using :meth:`popitem` to make + space when necessary. Derived classes may override + :meth:`popitem` to implement specific caching strategies. + + """ + + def __init__(self, maxsize, getsizeof=None): + if getsizeof is not None: + self.getsizeof = getsizeof + self.__mapping = dict() + self.__maxsize = maxsize + self.__currsize = 0 def __getitem__(self, key): - return self.__data[key] + return self.__mapping[key][0] def __setitem__(self, key, value): + mapping = self.__mapping + maxsize = self.__maxsize size = self.getsizeof(value) - if size > self.maxsize: - raise ValueError - while self.size > self.maxsize - size: - self.pop(next(iter(self))) - self.__data[key] = value - self.__size += size + if size > maxsize: + raise ValueError('value too large') + if key not in mapping or mapping[key][1] < size: + while self.__currsize + size > maxsize: + self.popitem() + if key in mapping: + self.__currsize -= mapping[key][1] + mapping[key] = (value, size) + self.__currsize += size def __delitem__(self, key): - self.__size -= self.getsizeof(self.__data.pop(key)) + _, size = self.__mapping.pop(key) + self.__currsize -= size def __iter__(self): - return iter(self.__data) + return iter(self.__mapping) def __len__(self): - return len(self.__data) + return len(self.__mapping) def __repr__(self): - return '%s(%r, size=%d, maxsize=%d)' % ( + return '%s(%r, maxsize=%d, currsize=%d)' % ( self.__class__.__name__, - self.__data, - self.__size, + list(self.items()), self.__maxsize, + self.__currsize, ) - @property - def size(self): - return self.__size - @property def maxsize(self): + """Return the maximum size of the cache.""" return self.__maxsize - @maxsize.setter - def maxsize(self, value): - while self.size > value: - self.pop(next(iter(self))) - self.__maxsize = value + @property + def currsize(self): + """Return the current size of the cache.""" + return self.__currsize @staticmethod - def getsizeof(_): + def getsizeof(object): + """Return the size of a cache element.""" return 1 -class LRUCache(_Cache): - """Least Recently Used (LRU) cache implementation. +class RRCache(Cache): + """Random Replacement (RR) cache implementation. - Discards the least recently used items first to make space when - necessary. + This cache randomly selects candidate items and discards them to + make space when necessary. - This implementation uses :class:`collections.OrderedDict` to keep - track of item usage. """ - class OrderedDict(collections.OrderedDict): - # OrderedDict.move_to_end is only available in Python 3 - if hasattr(collections.OrderedDict, 'move_to_end'): - def __getitem__(self, key, - getitem=collections.OrderedDict.__getitem__): - self.move_to_end(key) - return getitem(self, key) - else: - def __getitem__(self, key, - getitem=collections.OrderedDict.__getitem__, - delitem=collections.OrderedDict.__delitem__, - setitem=collections.OrderedDict.__setitem__): - value = getitem(self, key) - delitem(self, key) - setitem(self, key, value) - return value - - def __init__(self, maxsize, getsizeof=None): - if getsizeof is not None: - self.getsizeof = getsizeof - _Cache.__init__(self, self.OrderedDict(), maxsize) + def popitem(self): + """Remove and return a random `(key, value)` pair.""" + try: + key = random.choice(list(self)) + except IndexError: + raise KeyError('cache is empty') + return (key, self.pop(key)) -class LFUCache(_Cache): +class LFUCache(Cache): """Least Frequently Used (LFU) cache implementation. - Counts how often an item is needed, and discards the items used - least often to make space when necessary. + This cache counts how often an item is retrieved, and discards the + items used least often to make space when necessary. - This implementation uses :class:`collections.Counter` to keep - track of usage counts. """ def __init__(self, maxsize, getsizeof=None): if getsizeof is not None: - self.getsizeof = getsizeof - _Cache.__init__(self, {}, maxsize) - self.__counter = collections.Counter() + Cache.__init__(self, maxsize, lambda e: getsizeof(e[0])) + else: + Cache.__init__(self, maxsize) - def __getitem__(self, key): - value = _Cache.__getitem__(self, key) - self.__counter[key] += 1 - return value + def __getitem__(self, key, cache_getitem=Cache.__getitem__): + entry = cache_getitem(self, key) + entry[1] += 1 + return entry[0] - def __setitem__(self, key, value): - _Cache.__setitem__(self, key, value) - self.__counter[key] += 0 + def __setitem__(self, key, value, cache_setitem=Cache.__setitem__): + cache_setitem(self, key, [value, 0]) - def __delitem__(self, key): - _Cache.__delitem__(self, key) - del self.__counter[key] - - def __iter__(self): - items = reversed(self.__counter.most_common()) - return iter(map(operator.itemgetter(0), items)) + def popitem(self): + """Remove and return the `(key, value)` pair least frequently used.""" + items = ((key, Cache.__getitem__(self, key)[1]) for key in self) + try: + key, _ = min(items, key=operator.itemgetter(1)) + except ValueError: + raise KeyError('cache is empty') + return (key, self.pop(key)) -class RRCache(_Cache): - """Random Replacement (RR) cache implementation. +class LRUCache(Cache): + """Least Recently Used (LRU) cache implementation. - Randomly selects candidate items and discards then to make space - when necessary. + This cache discards the least recently used items first to make + space when necessary. - This implementations uses :func:`random.shuffle` to select the - items to be discarded. """ def __init__(self, maxsize, getsizeof=None): if getsizeof is not None: - self.getsizeof = getsizeof - _Cache.__init__(self, {}, maxsize) + Cache.__init__(self, maxsize, lambda e: getsizeof(e[0])) + else: + Cache.__init__(self, maxsize) + root = _Link() + root.prev = root.next = root + self.__root = root + + def __getitem__(self, key, cache_getitem=Cache.__getitem__): + value, link = cache_getitem(self, key) + root = self.__root + link.prev.next = link.next + link.next.prev = link.prev + link.prev = tail = root.prev + link.next = root + tail.next = root.prev = link + return value - def __iter__(self): - keys = list(_Cache.__iter__(self)) - random.shuffle(keys) - return iter(keys) + def __setitem__(self, key, value, + cache_getitem=Cache.__getitem__, + cache_setitem=Cache.__setitem__): + try: + _, link = cache_getitem(self, key) + except KeyError: + link = _Link() + cache_setitem(self, key, (value, link)) + try: + link.prev.next = link.next + link.next.prev = link.prev + except AttributeError: + link.data = key + root = self.__root + link.prev = tail = root.prev + link.next = root + tail.next = root.prev = link + + def __delitem__(self, key, + cache_getitem=Cache.__getitem__, + cache_delitem=Cache.__delitem__): + _, link = cache_getitem(self, key) + cache_delitem(self, key) + link.prev.next = link.next + link.next.prev = link.prev + del link.next + del link.prev + + def popitem(self): + """Remove and return the `(key, value)` pair least recently used.""" + root = self.__root + link = root.next + if link is root: + raise KeyError('cache is empty') + key = link.data + return (key, self.pop(key)) + + +class TTLCache(LRUCache): + """LRU cache implementation with per-item time-to-live (TTL) value. + + This least-recently-used cache associates a time-to-live value + with each item. Items that expire because they have exceeded + their time-to-live are removed from the cache automatically. + + """ + + def __init__(self, maxsize, ttl, getsizeof=None, timer=time.time): + if getsizeof is not None: + LRUCache.__init__(self, maxsize, lambda e: getsizeof(e[0])) + else: + LRUCache.__init__(self, maxsize) + root = _Link() + root.prev = root.next = root + self.__root = root + self.__timer = timer + self.__ttl = ttl + + def __getitem__(self, key, + cache_getitem=LRUCache.__getitem__, + cache_delitem=LRUCache.__delitem__): + value, link = cache_getitem(self, key) + if self.__timer() < link.data[1]: + return value + root = self.__root + head = root.next + link = link.next + while head is not link: + cache_delitem(self, head.data[0]) + head.next.prev = root + head = root.next = head.next + raise KeyError('%r has expired' % key) + + def __setitem__(self, key, value, + cache_getitem=LRUCache.__getitem__, + cache_setitem=LRUCache.__setitem__, + cache_delitem=LRUCache.__delitem__): + root = self.__root + head = root.next + time = self.__timer() + while head is not root and head.data[1] < time: + cache_delitem(self, head.data[0]) + head.next.prev = root + head = root.next = head.next + try: + _, link = cache_getitem(self, key) + except KeyError: + link = _Link() + cache_setitem(self, key, (value, link)) + try: + link.prev.next = link.next + link.next.prev = link.prev + except AttributeError: + pass + link.data = (key, time + self.__ttl) + link.prev = tail = root.prev + link.next = root + tail.next = root.prev = link + + def __delitem__(self, key, + cache_getitem=LRUCache.__getitem__, + cache_delitem=LRUCache.__delitem__): + _, link = cache_getitem(self, key) + cache_delitem(self, key) + link.prev.next = link.next + link.next.prev = link.prev + + def __repr__(self, cache_getitem=LRUCache.__getitem__): + return '%s(%r, maxsize=%d, currsize=%d)' % ( + self.__class__.__name__, + [(key, cache_getitem(self, key)[0]) for key in self], + self.maxsize, + self.currsize, + ) + + def pop(self, key, default=_marker): + try: + value, link = LRUCache.__getitem__(self, key) + except KeyError: + if default is _marker: + raise + else: + return default + LRUCache.__delitem__(self, key) + link.prev.next = link.next + link.next.prev = link.prev + del link.next + del link.prev + return value CacheInfo = collections.namedtuple('CacheInfo', 'hits misses maxsize currsize') @@ -191,7 +328,10 @@ def _cachedfunc(cache, makekey, lock): def cache_info(): with lock: - return CacheInfo(stats[0], stats[1], cache.maxsize, cache.size) + hits, misses = stats + maxsize = cache.maxsize + currsize = cache.currsize + return CacheInfo(hits, misses, maxsize, currsize) def cache_clear(): with lock: @@ -204,19 +344,17 @@ def _cachedfunc(cache, makekey, lock): return decorator -def _cachedmeth(getcache, makekey, lock): +def _cachedmeth(getcache, makekey): def decorator(func): def wrapper(self, *args, **kwargs): key = makekey((func,) + args, kwargs) cache = getcache(self) - with lock: - try: - return cache[key] - except KeyError: - pass + try: + return cache[key] + except KeyError: + pass result = func(self, *args, **kwargs) - with lock: - cache[key] = result + cache[key] = result return result return functools.update_wrapper(wrapper, func) @@ -254,10 +392,10 @@ def rr_cache(maxsize=128, typed=False, getsizeof=None, lock=RLock): return _cachedfunc(RRCache(maxsize, getsizeof), makekey, lock()) -def cachedmethod(getcache, typed=False, lock=RLock): +def cachedmethod(cache, typed=False): """Decorator to wrap a class or instance method with a memoizing callable that saves results in a (possibly shared) cache. """ makekey = _makekey_typed if typed else _makekey - return _cachedmeth(getcache, makekey, lock()) + return _cachedmeth(cache, makekey) diff --git a/docs/index.rst b/docs/index.rst index 0c78a17..c0ad084 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -14,7 +14,7 @@ including a variant of the Python 3 Standard Library >>> cache['first'] = 1 >>> cache['second'] = 2 >>> cache - LRUCache(OrderedDict([('first', 1), ('second', 2)]), size=2, maxsize=2) + LRUCache(maxsize=2, currsize=2, items=[('first', 1)]) >>> cache['third'] = 3 >>> cache LRUCache(OrderedDict([('second', 2), ('third', 3)]), size=2, maxsize=2) @@ -26,26 +26,30 @@ including a variant of the Python 3 Standard Library >>> cache LRUCache(OrderedDict([('second', 2), ('fourth', 4)]), size=2, maxsize=2) -For the purpose of this module, a *cache* is a mutable_ mapping_ with -additional attributes :attr:`size` and :attr:`maxsize`, which hold the -current and maximum size of the cache, and a (possibly static) method -:meth:`getsizeof`. +For the purpose of this module, a *cache* is a mutable_ mapping_ of a +fixed maximum *size*. When the cache is full, i.e. the current size +of the cache exceeds its maximum size, the cache must choose which +item(s) to discard based on a suitable `cache algorithm`_. -The current size of the cache is the sum of the results of -:meth:`getsizeof` applied to each of the cache's values, -i.e. ``cache.size == sum(map(cache.getsizeof, cache.values()), 0)``. -As a special case, if :meth:`getsizeof` returns :const:`1` -irrespective of its argument, ``cache.size == len(cache)``. - -When the cache is full, i.e. ``cache.size > cache.maxsize``, the cache -must choose which item(s) to discard based on a suitable `cache -algorithm`_. +In general, a cache's size is the sum of its element's sizes. For the +trivial case, if the size of each element is :const:`1`, a cache's +size is equal to the number of its entries, i.e. :func:`len`. An +element's size may also be a property or function of its value, +e.g. the result of :func:`sys.getsizeof`, or :func:`len` for string +and sequence elements. This module provides various cache implementations based on different cache algorithms, as well as decorators for easily memoizing function and method calls. +Cache Base Class +------------------------------------------------------------------------ + +.. autoclass:: Cache + :members: + + Cache Implementations ------------------------------------------------------------------------ @@ -58,6 +62,18 @@ Cache Implementations .. autoclass:: RRCache :members: +.. autoclass:: TTLCache + :members: + + Note that a cache element may expire at *any* time, so the + following *may* raise an exception:: + + cache = TTLCache(100, 1) + ... + for k in cache: + print(cache[k]) + + Function Decorators ------------------------------------------------------------------------ @@ -126,7 +142,7 @@ Method Decorators Python 3 example of a shared (class) LRU cache for static web content:: - class CachedPEPs(object): + class CachedPEPs(object): cache = LRUCache(maxsize=32) diff --git a/tests/__init__.py b/tests/__init__.py index e69de29..4ab4a11 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -0,0 +1,126 @@ +class CacheTestMixin(object): + + def make_cache(self, maxsize, getsizeof=None): + raise NotImplementedError + + def test_insert(self): + cache = self.make_cache(maxsize=2) + + cache.update({1: 1, 2: 2}) + self.assertEqual(2, len(cache)) + self.assertEqual(1, cache[1]) + self.assertEqual(2, cache[2]) + + cache[3] = 3 + self.assertEqual(2, len(cache)) + self.assertEqual(3, cache[3]) + self.assertTrue(1 in cache or 2 in cache) + + cache[4] = 4 + self.assertEqual(2, len(cache)) + self.assertEqual(4, cache[4]) + self.assertTrue(1 in cache or 2 in cache or 3 in cache) + + def test_update(self): + cache = self.make_cache(maxsize=2) + + cache.update({1: 1, 2: 2}) + self.assertEqual(2, len(cache)) + self.assertEqual(1, cache[1]) + self.assertEqual(2, cache[2]) + + cache.update({1: 1, 2: 2}) + self.assertEqual(2, len(cache)) + self.assertEqual(1, cache[1]) + self.assertEqual(2, cache[2]) + + cache.update({1: 'a', 2: 'b'}) + self.assertEqual(2, len(cache)) + self.assertEqual('a', cache[1]) + self.assertEqual('b', cache[2]) + + def test_delete(self): + cache = self.make_cache(maxsize=2) + + cache.update({1: 1, 2: 2}) + self.assertEqual(2, len(cache)) + self.assertEqual(1, cache[1]) + self.assertEqual(2, cache[2]) + + del cache[2] + self.assertEqual(1, len(cache)) + self.assertEqual(1, cache[1]) + self.assertNotIn(2, cache) + + del cache[1] + self.assertEqual(0, len(cache)) + self.assertNotIn(1, cache) + self.assertNotIn(2, cache) + + def test_pop(self): + cache = self.make_cache(maxsize=2) + + cache.update({1: 1, 2: 2}) + self.assertEqual(2, cache.pop(2)) + self.assertEqual(1, len(cache)) + self.assertEqual(1, cache.pop(1)) + self.assertEqual(0, len(cache)) + + with self.assertRaises(KeyError): + cache.pop(2) + with self.assertRaises(KeyError): + cache.pop(1) + with self.assertRaises(KeyError): + cache.pop(0) + + self.assertEqual(None, cache.pop(2, None)) + self.assertEqual(None, cache.pop(1, None)) + self.assertEqual(None, cache.pop(0, None)) + + def test_popitem(self): + cache = self.make_cache(maxsize=2) + + cache.update({1: 1, 2: 2}) + self.assertIn(cache.pop(1), {1: 1, 2: 2}) + self.assertEqual(1, len(cache)) + self.assertIn(cache.pop(2), {1: 1, 2: 2}) + self.assertEqual(0, len(cache)) + + with self.assertRaises(KeyError): + cache.popitem() + + def test_getsizeof(self): + cache = self.make_cache(maxsize=3, getsizeof=lambda x: x) + self.assertEqual(3, cache.maxsize) + self.assertEqual(0, cache.currsize) + + cache.update({1: 1, 2: 2}) + self.assertEqual(2, len(cache)) + self.assertEqual(3, cache.currsize) + self.assertEqual(1, cache[1]) + self.assertEqual(2, cache[2]) + + cache[1] = 2 + self.assertEqual(1, len(cache)) + self.assertEqual(2, cache.currsize) + self.assertEqual(2, cache[1]) + self.assertNotIn(2, cache) + + cache.update({1: 1, 2: 2}) + self.assertEqual(2, len(cache)) + self.assertEqual(3, cache.currsize) + self.assertEqual(1, cache[1]) + self.assertEqual(2, cache[2]) + + cache[3] = 3 + self.assertEqual(1, len(cache)) + self.assertEqual(3, cache.currsize) + self.assertEqual(3, cache[3]) + self.assertNotIn(1, cache) + self.assertNotIn(2, cache) + + with self.assertRaises(ValueError): + cache[4] = 4 + self.assertEqual(1, len(cache)) + self.assertEqual(3, cache.currsize) + self.assertEqual(3, cache[3]) diff --git a/tests/test_cache.py b/tests/test_cache.py index e2a58e6..60e9220 100644 --- a/tests/test_cache.py +++ b/tests/test_cache.py @@ -1,41 +1,10 @@ import unittest -import cachetools -import collections +from . import CacheTestMixin +from cachetools import Cache -class CacheTest(unittest.TestCase): +class CacheTest(unittest.TestCase, CacheTestMixin): - def test_dict_cache(self): - cache = cachetools._Cache({'a': 1, 'b': 2}, maxsize=2) - - self.assertEqual(len(cache), 2) - self.assertEqual(cache['a'], 1) - self.assertEqual(cache['b'], 2) - - cache['c'] = 3 - - self.assertEqual(len(cache), 2) - self.assertTrue('a' in cache or 'b' in cache) - self.assertEqual(cache['c'], 3) - - cache.maxsize = 1 - - self.assertEqual(len(cache), 1) - self.assertTrue('a' in cache or 'b' in cache or 'c' in cache) - - def test_ordered_dict_cache(self): - cache = cachetools._Cache(collections.OrderedDict(), maxsize=2) - - cache['a'] = 1 - cache['b'] = 2 - cache['c'] = 3 - - self.assertEqual(len(cache), 2) - self.assertEqual(cache['b'], 2) - self.assertEqual(cache['c'], 3) - - cache.maxsize = 1 - - self.assertEqual(len(cache), 1) - self.assertEqual(cache['c'], 3) + def make_cache(self, maxsize, getsizeof=None): + return Cache(maxsize, getsizeof) diff --git a/tests/test_lfucache.py b/tests/test_lfucache.py index cfdf0ec..245793e 100644 --- a/tests/test_lfucache.py +++ b/tests/test_lfucache.py @@ -1,5 +1,6 @@ import unittest +from . import CacheTestMixin from cachetools import LFUCache, lfu_cache @@ -13,47 +14,50 @@ def cached_typed(n): return n -class LFUCacheTest(unittest.TestCase): +class LFUCacheTest(unittest.TestCase, CacheTestMixin): - def test_insert(self): - cache = LFUCache(maxsize=2) + def make_cache(self, maxsize, getsizeof=None): + return LFUCache(maxsize, getsizeof) - cache['a'] = 1 - cache['a'] - cache['b'] = 2 - cache['c'] = 3 + def test_lfu_insert(self): + cache = self.make_cache(maxsize=2) + + cache[1] = 1 + cache[1] + cache[2] = 2 + cache[3] = 3 self.assertEqual(len(cache), 2) - self.assertEqual(cache['a'], 1) - self.assertTrue('b' in cache or 'c' in cache) - self.assertTrue('b' not in cache or 'c' not in cache) + self.assertEqual(cache[1], 1) + self.assertTrue(2 in cache or 3 in cache) + self.assertTrue(2 not in cache or 3 not in cache) - cache['d'] = 4 + cache[4] = 4 self.assertEqual(len(cache), 2) - self.assertEqual(cache['d'], 4) - self.assertEqual(cache['a'], 1) + self.assertEqual(cache[4], 4) + self.assertEqual(cache[1], 1) - def test_getsizeof(self): - cache = LFUCache(maxsize=3, getsizeof=lambda x: x) + def test_lfu_getsizeof(self): + cache = self.make_cache(maxsize=3, getsizeof=lambda x: x) - cache['a'] = 1 - cache['b'] = 2 + cache[1] = 1 + cache[2] = 2 self.assertEqual(len(cache), 2) - self.assertEqual(cache['a'], 1) - self.assertEqual(cache['b'], 2) + self.assertEqual(cache[1], 1) + self.assertEqual(cache[2], 2) - cache['c'] = 3 + cache[3] = 3 self.assertEqual(len(cache), 1) - self.assertEqual(cache['c'], 3) - self.assertNotIn('a', cache) - self.assertNotIn('b', cache) + self.assertEqual(cache[3], 3) + self.assertNotIn(1, cache) + self.assertNotIn(2, cache) with self.assertRaises(ValueError): - cache['d'] = 4 + cache[4] = 4 self.assertEqual(len(cache), 1) - self.assertEqual(cache['c'], 3) + self.assertEqual(cache[3], 3) def test_decorator(self): self.assertEqual(cached(1), 1) diff --git a/tests/test_lrucache.py b/tests/test_lrucache.py index c8487fe..242a0e7 100644 --- a/tests/test_lrucache.py +++ b/tests/test_lrucache.py @@ -1,5 +1,6 @@ import unittest +from . import CacheTestMixin from cachetools import LRUCache, lru_cache @@ -13,54 +14,57 @@ def cached_typed(n): return n -class LRUCacheTest(unittest.TestCase): +class LRUCacheTest(unittest.TestCase, CacheTestMixin): - def test_insert(self): - cache = LRUCache(maxsize=2) + def make_cache(self, maxsize, getsizeof=None): + return LRUCache(maxsize, getsizeof) - cache['a'] = 1 - cache['b'] = 2 - cache['c'] = 3 + def test_lru_insert(self): + cache = self.make_cache(maxsize=2) + + cache[1] = 1 + cache[2] = 2 + cache[3] = 3 self.assertEqual(len(cache), 2) - self.assertEqual(cache['b'], 2) - self.assertEqual(cache['c'], 3) - self.assertNotIn('a', cache) + self.assertEqual(cache[2], 2) + self.assertEqual(cache[3], 3) + self.assertNotIn(1, cache) - cache['b'] - cache['d'] = 4 + cache[2] + cache[4] = 4 self.assertEqual(len(cache), 2) - self.assertEqual(cache['b'], 2) - self.assertEqual(cache['d'], 4) - self.assertNotIn('c', cache) + self.assertEqual(cache[2], 2) + self.assertEqual(cache[4], 4) + self.assertNotIn(3, cache) - cache['e'] = 5 + cache[5] = 5 self.assertEqual(len(cache), 2) - self.assertEqual(cache['d'], 4) - self.assertEqual(cache['e'], 5) - self.assertNotIn('b', cache) + self.assertEqual(cache[4], 4) + self.assertEqual(cache[5], 5) + self.assertNotIn(2, cache) - def test_getsizeof(self): - cache = LRUCache(maxsize=3, getsizeof=lambda x: x) + def test_lru_getsizeof(self): + cache = self.make_cache(maxsize=3, getsizeof=lambda x: x) - cache['a'] = 1 - cache['b'] = 2 + cache[1] = 1 + cache[2] = 2 self.assertEqual(len(cache), 2) - self.assertEqual(cache['a'], 1) - self.assertEqual(cache['b'], 2) + self.assertEqual(cache[1], 1) + self.assertEqual(cache[2], 2) - cache['c'] = 3 + cache[3] = 3 self.assertEqual(len(cache), 1) - self.assertEqual(cache['c'], 3) - self.assertNotIn('a', cache) - self.assertNotIn('b', cache) + self.assertEqual(cache[3], 3) + self.assertNotIn(1, cache) + self.assertNotIn(2, cache) with self.assertRaises(ValueError): - cache['d'] = 4 + cache[4] = 4 self.assertEqual(len(cache), 1) - self.assertEqual(cache['c'], 3) + self.assertEqual(cache[3], 3) def test_decorator(self): self.assertEqual(cached.cache_info(), (0, 0, 2, 0)) diff --git a/tests/test_rrcache.py b/tests/test_rrcache.py index 088eedf..207b7fd 100644 --- a/tests/test_rrcache.py +++ b/tests/test_rrcache.py @@ -1,5 +1,6 @@ import unittest +from . import CacheTestMixin from cachetools import RRCache, rr_cache @@ -13,41 +14,10 @@ def cached_typed(n): return n -class RRCacheTest(unittest.TestCase): +class RRCacheTest(unittest.TestCase, CacheTestMixin): - def test_insert(self): - cache = RRCache(maxsize=2) - - cache['a'] = 1 - cache['b'] = 2 - cache['c'] = 3 - - self.assertEqual(len(cache), 2) - self.assertTrue('a' in cache or ('b' in cache and 'c' in cache)) - self.assertTrue('b' in cache or ('a' in cache and 'c' in cache)) - self.assertTrue('c' in cache or ('a' in cache and 'b' in cache)) - - def test_getsizeof(self): - cache = RRCache(maxsize=3, getsizeof=lambda x: x) - - cache['a'] = 1 - cache['b'] = 2 - - self.assertEqual(len(cache), 2) - self.assertEqual(cache['a'], 1) - self.assertEqual(cache['b'], 2) - - cache['c'] = 3 - - self.assertEqual(len(cache), 1) - self.assertEqual(cache['c'], 3) - self.assertNotIn('a', cache) - self.assertNotIn('b', cache) - - with self.assertRaises(ValueError): - cache['d'] = 4 - self.assertEqual(len(cache), 1) - self.assertEqual(cache['c'], 3) + def make_cache(self, maxsize, getsizeof=None): + return RRCache(maxsize, getsizeof) def test_decorator(self): self.assertEqual(cached(1), 1) diff --git a/tests/test_ttlcache.py b/tests/test_ttlcache.py new file mode 100644 index 0000000..d1f946c --- /dev/null +++ b/tests/test_ttlcache.py @@ -0,0 +1,75 @@ +import unittest + +from . import CacheTestMixin +from cachetools import TTLCache + + +class TTLCacheTest(unittest.TestCase, CacheTestMixin): + + def make_cache(self, maxsize, getsizeof=None, ttl=86400): + return TTLCache(maxsize, ttl, getsizeof) + + def test_ttl_insert(self): + cache = self.make_cache(maxsize=2) + + cache[1] = 1 + cache[2] = 2 + #cache[1] = 1 + cache[3] = 3 + + self.assertEqual(len(cache), 2) + #self.assertEqual(cache[1], 1) + #self.assertTrue(2 in cache or 3 in cache) + #self.assertTrue(2 not in cache or 3 not in cache) + + cache[4] = 4 + self.assertEqual(len(cache), 2) + self.assertEqual(cache[4], 4) + #self.assertEqual(cache[1], 1) + + def test_ttl_getsizeof(self): + cache = self.make_cache(maxsize=3, getsizeof=lambda x: x) + + cache[1] = 1 + cache[2] = 2 + + self.assertEqual(len(cache), 2) + self.assertEqual(cache[1], 1) + self.assertEqual(cache[2], 2) + + cache[3] = 3 + + self.assertEqual(len(cache), 1) + self.assertEqual(cache[3], 3) + self.assertNotIn(1, cache) + self.assertNotIn(2, cache) + + with self.assertRaises(ValueError): + cache[4] = 4 + self.assertEqual(len(cache), 1) + self.assertEqual(cache[3], 3) + + def test_ttl_expire(self): + cache = self.make_cache(maxsize=2, ttl=0) + + cache[1] = 1 + cache[2] = 2 + cache[3] = 3 + + with self.assertRaises(KeyError): + cache[1] + with self.assertRaises(KeyError): + cache[2] + with self.assertRaises(KeyError): + cache[3] + +# +# self.assertEqual(len(cache), 2) +# self.assertEqual(cache[1], 1) +# self.assertTrue(2 in cache or 3 in cache) +# self.assertTrue(2 not in cache or 3 not in cache) +# +# cache[4] = 4 +# self.assertEqual(len(cache), 2) +# self.assertEqual(cache[4], 4) +# self.assertEqual(cache[1], 1) -- cgit v1.2.3 From b210c14c7fb336205625be8d29ece52c2fba6abe Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Mon, 16 Jun 2014 20:22:12 +0200 Subject: Prepare v0.4.0 --- Changes | 6 +++ README.rst | 39 ++++++-------- cachetools.py | 74 ++++++++++++++------------ docs/index.rst | 137 +++++++++++++++++++++++++++---------------------- tests/test_ttlcache.py | 53 +------------------ 5 files changed, 141 insertions(+), 168 deletions(-) diff --git a/Changes b/Changes index 3aa2519..4c69fef 100644 --- a/Changes +++ b/Changes @@ -1,3 +1,9 @@ +0.4.0 2014-06-16 + +* Add TTLCache. +* Add Cache base class. +* Remove @cachedmethod lock parameter. + 0.3.1 2014-05-07 * Add proper locking for cache_clear(), cache_info(). diff --git a/README.rst b/README.rst index 4b01431..af046ed 100644 --- a/README.rst +++ b/README.rst @@ -9,35 +9,28 @@ including a variant of the Python 3 Standard Library >>> from cachetools import LRUCache >>> cache = LRUCache(maxsize=2) - >>> cache['first'] = 1 - >>> cache['second'] = 2 + >>> cache.update([('first', 1), ('second', 2)]) >>> cache - LRUCache(OrderedDict([('first', 1), ('second', 2)]), size=2, maxsize=2) + LRUCache([('second', 2), ('first', 1)], maxsize=2, currsize=2) >>> cache['third'] = 3 >>> cache - LRUCache(OrderedDict([('second', 2), ('third', 3)]), size=2, maxsize=2) + LRUCache([('second', 2), ('third', 3)], maxsize=2, currsize=2) >>> cache['second'] 2 - >>> cache - LRUCache(OrderedDict([('third', 3), ('second', 2)]), size=2, maxsize=2) >>> cache['fourth'] = 4 - >>> cache - LRUCache(OrderedDict([('second', 2), ('fourth', 4)]), size=2, maxsize=2) - -For the purpose of this module, a *cache* is a mutable_ mapping_ with -additional attributes ``size`` and ``maxsize``, which hold the current -and maximum size of the cache, and a (possibly static) method -``getsizeof``. - -The current size of the cache is the sum of the results of -``getsizeof`` applied to each of the cache's values, i.e. ``cache.size -== sum(map(cache.getsizeof, cache.values()), 0)``. As a special case, -if ``getsizeof`` returns ``1`` irrespective of its argument, -``cache.size == len(cache)``. - -When the cache is full, i.e. ``cache.size > cache.maxsize``, the cache -must choose which item(s) to discard based on a suitable `cache -algorithm`_. + LRUCache([('second', 2), ('fourth', 4)], maxsize=2, currsize=2) + + +For the purpose of this module, a *cache* is a mutable_ mapping_ of a +fixed maximum *size*. When the cache is full, i.e. the current size +of the cache exceeds its maximum size, the cache must choose which +item(s) to discard based on a suitable `cache algorithm`_. + +In general, a cache's size is the sum of the size of its items. If +the size of each items is :const:`1`, a cache's size is equal to the +number of its items, i.e. :func:`len`. An items's size may also be a +property or function of its value, e.g. the result of +:func:`sys.getsizeof`, or :func:`len` for string and sequence values. This module provides various cache implementations based on different cache algorithms, as well as decorators for easily memoizing function diff --git a/cachetools.py b/cachetools.py index 24ef617..feec458 100644 --- a/cachetools.py +++ b/cachetools.py @@ -21,11 +21,14 @@ class _Link(object): class Cache(collections.MutableMapping): - """Mutable mapping to serve as a cache. + """Mutable mapping to serve as a simple cache or cache base class. This class discards arbitrary items using :meth:`popitem` to make space when necessary. Derived classes may override - :meth:`popitem` to implement specific caching strategies. + :meth:`popitem` to implement specific caching strategies. If a + subclass has to keep track of item access, insertion or deletion, + it may need override :meth:`__getitem__`, :meth:`__setitem__` and + :meth:`__delitem__`, too. """ @@ -82,7 +85,7 @@ class Cache(collections.MutableMapping): return self.__currsize @staticmethod - def getsizeof(object): + def getsizeof(value): """Return the size of a cache element.""" return 1 @@ -90,7 +93,7 @@ class Cache(collections.MutableMapping): class RRCache(Cache): """Random Replacement (RR) cache implementation. - This cache randomly selects candidate items and discards them to + This class randomly selects candidate items and discards them to make space when necessary. """ @@ -107,7 +110,7 @@ class RRCache(Cache): class LFUCache(Cache): """Least Frequently Used (LFU) cache implementation. - This cache counts how often an item is retrieved, and discards the + This class counts how often an item is retrieved, and discards the items used least often to make space when necessary. """ @@ -139,7 +142,7 @@ class LFUCache(Cache): class LRUCache(Cache): """Least Recently Used (LRU) cache implementation. - This cache discards the least recently used items first to make + This class discards the least recently used items first to make space when necessary. """ @@ -191,6 +194,14 @@ class LRUCache(Cache): del link.next del link.prev + def __repr__(self, cache_getitem=Cache.__getitem__): + return '%s(%r, maxsize=%d, currsize=%d)' % ( + self.__class__.__name__, + [(key, cache_getitem(self, key)[0]) for key in self], + self.maxsize, + self.currsize, + ) + def popitem(self): """Remove and return the `(key, value)` pair least recently used.""" root = self.__root @@ -202,11 +213,13 @@ class LRUCache(Cache): class TTLCache(LRUCache): - """LRU cache implementation with per-item time-to-live (TTL) value. + """Cache implementation with per-item time-to-live (TTL) value. - This least-recently-used cache associates a time-to-live value - with each item. Items that expire because they have exceeded - their time-to-live are removed from the cache automatically. + This class associates a time-to-live value with each item. Items + that expire because they have exceeded their time-to-live will be + removed automatically. If no expired items are there to remove, + the least recently used items will be discarded first to make + space when necessary. """ @@ -282,10 +295,9 @@ class TTLCache(LRUCache): try: value, link = LRUCache.__getitem__(self, key) except KeyError: - if default is _marker: - raise - else: + if default is not _marker: return default + raise LRUCache.__delitem__(self, key) link.prev.next = link.next link.next.prev = link.prev @@ -344,24 +356,6 @@ def _cachedfunc(cache, makekey, lock): return decorator -def _cachedmeth(getcache, makekey): - def decorator(func): - def wrapper(self, *args, **kwargs): - key = makekey((func,) + args, kwargs) - cache = getcache(self) - try: - return cache[key] - except KeyError: - pass - result = func(self, *args, **kwargs) - cache[key] = result - return result - - return functools.update_wrapper(wrapper, func) - - return decorator - - def lru_cache(maxsize=128, typed=False, getsizeof=None, lock=RLock): """Decorator to wrap a function with a memoizing callable that saves up to `maxsize` results based on a Least Recently Used (LRU) @@ -398,4 +392,20 @@ def cachedmethod(cache, typed=False): """ makekey = _makekey_typed if typed else _makekey - return _cachedmeth(cache, makekey) + + def decorator(method): + def wrapper(self, *args, **kwargs): + # TODO: `shared`, locking... + key = makekey((method,) + args, kwargs) + mapping = cache(self) + try: + return mapping[key] + except KeyError: + pass + result = method(self, *args, **kwargs) + mapping[key] = result + return result + + return functools.update_wrapper(wrapper, method) + + return decorator diff --git a/docs/index.rst b/docs/index.rst index c0ad084..e9fd406 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -11,47 +11,54 @@ including a variant of the Python 3 Standard Library >>> from cachetools import LRUCache >>> cache = LRUCache(maxsize=2) - >>> cache['first'] = 1 - >>> cache['second'] = 2 + >>> cache.update([('first', 1), ('second', 2)]) >>> cache - LRUCache(maxsize=2, currsize=2, items=[('first', 1)]) + LRUCache([('second', 2), ('first', 1)], maxsize=2, currsize=2) >>> cache['third'] = 3 >>> cache - LRUCache(OrderedDict([('second', 2), ('third', 3)]), size=2, maxsize=2) + LRUCache([('second', 2), ('third', 3)], maxsize=2, currsize=2) >>> cache['second'] 2 - >>> cache - LRUCache(OrderedDict([('third', 3), ('second', 2)]), size=2, maxsize=2) >>> cache['fourth'] = 4 - >>> cache - LRUCache(OrderedDict([('second', 2), ('fourth', 4)]), size=2, maxsize=2) + LRUCache([('second', 2), ('fourth', 4)], maxsize=2, currsize=2) For the purpose of this module, a *cache* is a mutable_ mapping_ of a fixed maximum *size*. When the cache is full, i.e. the current size of the cache exceeds its maximum size, the cache must choose which item(s) to discard based on a suitable `cache algorithm`_. -In general, a cache's size is the sum of its element's sizes. For the -trivial case, if the size of each element is :const:`1`, a cache's -size is equal to the number of its entries, i.e. :func:`len`. An -element's size may also be a property or function of its value, -e.g. the result of :func:`sys.getsizeof`, or :func:`len` for string -and sequence elements. +In general, a cache's size is the sum of the size of its items. If +the size of each items is :const:`1`, a cache's size is equal to the +number of its items, i.e. :func:`len`. An items's size may also be a +property or function of its value, e.g. the result of +:func:`sys.getsizeof`, or :func:`len` for string and sequence values. This module provides various cache implementations based on different cache algorithms, as well as decorators for easily memoizing function and method calls. -Cache Base Class +Cache Implementations ------------------------------------------------------------------------ -.. autoclass:: Cache - :members: +This module provides several classes implementing caches using +different cache algorithms. All these classes derive from class +:class:`Cache`, which in turn derives from +:class:`collections.MutableMapping`, providing additional properties +:attr:`maxsize` and :attr:`currsize` to retrieve the maximum and +current size of the cache. +:class:`Cache` also features a static method :meth:`getsizeof`, which +returns the size of a given item and may be overridden by subclasses. +The default implementation of :meth:`getsizeof` returns :const:`1` +irrespective of its `value` argument. For convenience, all cache +classes also accept an optional constructor parameter `getsizeof`, +that may specify a function of one argument used to extract the size +of an item's value instead of the class' :meth:`getsizeof` method. -Cache Implementations ------------------------------------------------------------------------- + +.. autoclass:: Cache + :members: .. autoclass:: LRUCache :members: @@ -65,14 +72,25 @@ Cache Implementations .. autoclass:: TTLCache :members: - Note that a cache element may expire at *any* time, so the - following *may* raise an exception:: + Note that a cache item may expire at *any* time, so iterating over + the items of a :class:`TTLCache` may raise :class:`KeyError` or + :class:`RuntimeError` unexpectedly:: + + from cachetools import TTLCache + import time - cache = TTLCache(100, 1) - ... - for k in cache: - print(cache[k]) + cache = TTLCache(maxsize=100, ttl=1) + cache.update({1: 1, 2: 2, 3: 3}) + time.sleep(1) + try: + for key in cache: + try: + print(cache[key]) + except KeyError: + print('Key %r has expired' % key) + except RuntimeError as e: + print(e) Function Decorators @@ -86,18 +104,18 @@ In addition to a `maxsize` parameter, all decorators feature optional arguments, which should be specified as keyword arguments for compatibility with future extensions: -If `typed` is set to :const:`True`, function arguments of different -types will be cached separately. +- `typed`, if is set to :const:`True`, will cause function arguments + of different types to be cached separately. -`getsizeof` specifies a function of one argument that will be applied -to each cache value to determine its size. The default value is -:const:`None`, which will assign each element an equal size of -:const:`1`. +- `getsizeof` specifies a function of one argument that will be + applied to each cache value to determine its size. The default + value is :const:`None`, which will assign each item an equal size of + :const:`1`. -`lock` specifies a function of zero arguments that returns a `context -manager`_ to lock the cache when necessary. If not specified, -:class:`threading.RLock` will be used for synchronizing access from -multiple threads. +- `lock` specifies a function of zero arguments that returns a + `context manager`_ to lock the cache when necessary. If not + specified, :class:`threading.RLock` will be used to synchronize + access from multiple threads. The wrapped function is instrumented with :func:`cache_info` and :func:`cache_clear` functions to provide information about cache @@ -107,54 +125,51 @@ documentation for details. Unlike :func:`functools.lru_cache`, setting `maxsize` to zero or :const:`None` is not supported. -.. decorator:: lru_cache(maxsize=128, typed=False, getsizeof=None, lock=threading.RLock) +.. decorator:: lfu_cache(maxsize=128, typed=False, getsizeof=None, lock=threading.RLock) - Decorator to wrap a function with a memoizing callable that saves - up to `maxsize` results based on a Least Recently Used (LRU) - algorithm. + Decorator that wraps a function with a memoizing callable that + saves up to `maxsize` results based on a Least Frequently Used + (LFU) algorithm. -.. decorator:: lfu_cache(maxsize=128, typed=False, getsizeof=None, lock=threading.RLock) +.. decorator:: lru_cache(maxsize=128, typed=False, getsizeof=None, lock=threading.RLock) - Decorator to wrap a function with a memoizing callable that saves - up to `maxsize` results based on a Least Frequently Used (LFU) + Decorator that wraps a function with a memoizing callable that + saves up to `maxsize` results based on a Least Recently Used (LRU) algorithm. .. decorator:: rr_cache(maxsize=128, typed=False, getsizeof=None, lock=threading.RLock) - Decorator to wrap a function with a memoizing callable that saves - up to `maxsize` results based on a Random Replacement (RR) + Decorator that wraps a function with a memoizing callable that + saves up to `maxsize` results based on a Random Replacement (RR) algorithm. Method Decorators ------------------------------------------------------------------------ -.. decorator:: cachedmethod(getcache, typed=False, lock=threading.RLock) - - Decorator to wrap a class or instance method with a memoizing - callable that saves results in a (possibly shared) cache. +.. decorator:: cachedmethod(cache, typed=False) - `getcache` specifies a function of one argument that, when passed + `cache` specifies a function of one argument that, when passed :const:`self`, will return the cache object for the instance or class. See the `Function Decorators`_ section for details on the - other arguments. + `typed` argument. Python 3 example of a shared (class) LRU cache for static web content:: - class CachedPEPs(object): + class CachedPEPs(object): - cache = LRUCache(maxsize=32) + cache = LRUCache(maxsize=32) - @cachedmethod(operator.attrgetter('cache')) - def get_pep(self, num): - """Retrieve text of a Python Enhancement Proposal""" - resource = 'http://www.python.org/dev/peps/pep-%04d/' % num - try: - with urllib.request.urlopen(resource) as s: - return s.read() - except urllib.error.HTTPError: - return 'Not Found' + @cachedmethod(operator.attrgetter('cache')) + def get_pep(self, num): + """Retrieve text of a Python Enhancement Proposal""" + resource = 'http://www.python.org/dev/peps/pep-%04d/' % num + try: + with urllib.request.urlopen(resource) as s: + return s.read() + except urllib.error.HTTPError: + return 'Not Found' .. _mutable: http://docs.python.org/dev/glossary.html#term-mutable diff --git a/tests/test_ttlcache.py b/tests/test_ttlcache.py index d1f946c..5e89642 100644 --- a/tests/test_ttlcache.py +++ b/tests/test_ttlcache.py @@ -9,47 +9,7 @@ class TTLCacheTest(unittest.TestCase, CacheTestMixin): def make_cache(self, maxsize, getsizeof=None, ttl=86400): return TTLCache(maxsize, ttl, getsizeof) - def test_ttl_insert(self): - cache = self.make_cache(maxsize=2) - - cache[1] = 1 - cache[2] = 2 - #cache[1] = 1 - cache[3] = 3 - - self.assertEqual(len(cache), 2) - #self.assertEqual(cache[1], 1) - #self.assertTrue(2 in cache or 3 in cache) - #self.assertTrue(2 not in cache or 3 not in cache) - - cache[4] = 4 - self.assertEqual(len(cache), 2) - self.assertEqual(cache[4], 4) - #self.assertEqual(cache[1], 1) - - def test_ttl_getsizeof(self): - cache = self.make_cache(maxsize=3, getsizeof=lambda x: x) - - cache[1] = 1 - cache[2] = 2 - - self.assertEqual(len(cache), 2) - self.assertEqual(cache[1], 1) - self.assertEqual(cache[2], 2) - - cache[3] = 3 - - self.assertEqual(len(cache), 1) - self.assertEqual(cache[3], 3) - self.assertNotIn(1, cache) - self.assertNotIn(2, cache) - - with self.assertRaises(ValueError): - cache[4] = 4 - self.assertEqual(len(cache), 1) - self.assertEqual(cache[3], 3) - - def test_ttl_expire(self): + def test_ttl(self): cache = self.make_cache(maxsize=2, ttl=0) cache[1] = 1 @@ -62,14 +22,3 @@ class TTLCacheTest(unittest.TestCase, CacheTestMixin): cache[2] with self.assertRaises(KeyError): cache[3] - -# -# self.assertEqual(len(cache), 2) -# self.assertEqual(cache[1], 1) -# self.assertTrue(2 in cache or 3 in cache) -# self.assertTrue(2 not in cache or 3 not in cache) -# -# cache[4] = 4 -# self.assertEqual(len(cache), 2) -# self.assertEqual(cache[4], 4) -# self.assertEqual(cache[1], 1) -- cgit v1.2.3 From e9a8c86562341f1321215aa86d2f1db06e388bc5 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Mon, 16 Jun 2014 20:42:08 +0200 Subject: Remove Sphinx markup --- README.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/README.rst b/README.rst index af046ed..25d3381 100644 --- a/README.rst +++ b/README.rst @@ -27,10 +27,10 @@ of the cache exceeds its maximum size, the cache must choose which item(s) to discard based on a suitable `cache algorithm`_. In general, a cache's size is the sum of the size of its items. If -the size of each items is :const:`1`, a cache's size is equal to the -number of its items, i.e. :func:`len`. An items's size may also be a -property or function of its value, e.g. the result of -:func:`sys.getsizeof`, or :func:`len` for string and sequence values. +the size of each items is `1`, a cache's size is equal to the number +of its items, i.e. `len`. An items's size may also be a property or +function of its value, e.g. the result of `sys.getsizeof`, or `len` +for string and sequence values. This module provides various cache implementations based on different cache algorithms, as well as decorators for easily memoizing function -- cgit v1.2.3 From e78228cf488219044e71f98de630a0b07e35b293 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Mon, 16 Jun 2014 20:43:26 +0200 Subject: Remove Sphinx markup --- README.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/README.rst b/README.rst index 25d3381..85b3596 100644 --- a/README.rst +++ b/README.rst @@ -27,10 +27,10 @@ of the cache exceeds its maximum size, the cache must choose which item(s) to discard based on a suitable `cache algorithm`_. In general, a cache's size is the sum of the size of its items. If -the size of each items is `1`, a cache's size is equal to the number -of its items, i.e. `len`. An items's size may also be a property or -function of its value, e.g. the result of `sys.getsizeof`, or `len` -for string and sequence values. +the size of each items is ``1``, a cache's size is equal to the number +of its items, i.e. ``len()``. An items's size may also be a property +or function of its value, e.g. the result of ``sys.getsizeof()``, or +``len`` for string and sequence values. This module provides various cache implementations based on different cache algorithms, as well as decorators for easily memoizing function -- cgit v1.2.3 From 6a6e50bd1a51f2cd52ea94fa4f71dc8ba2a30572 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Mon, 16 Jun 2014 20:43:49 +0200 Subject: Remove Sphinx markup --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index 85b3596..ef6358c 100644 --- a/README.rst +++ b/README.rst @@ -30,7 +30,7 @@ In general, a cache's size is the sum of the size of its items. If the size of each items is ``1``, a cache's size is equal to the number of its items, i.e. ``len()``. An items's size may also be a property or function of its value, e.g. the result of ``sys.getsizeof()``, or -``len`` for string and sequence values. +``len()`` for string and sequence values. This module provides various cache implementations based on different cache algorithms, as well as decorators for easily memoizing function -- cgit v1.2.3 From 8e1d7edf3cf0c86a9ed47992545b10c0aec4cd4e Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Mon, 16 Jun 2014 20:46:19 +0200 Subject: Remove Sphinx markup --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index ef6358c..ac71176 100644 --- a/README.rst +++ b/README.rst @@ -3,7 +3,7 @@ cachetools This module provides various memoizing collections and decorators, including a variant of the Python 3 Standard Library -`functools.lru_cache` function decorator. +``functools.lru_cache`` function decorator. .. code-block:: pycon -- cgit v1.2.3 From cbd852dca59743f8f21759249c90a77b583f8834 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Tue, 17 Jun 2014 11:01:55 +0200 Subject: Update keywords, classifiers. --- setup.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/setup.py b/setup.py index 7277345..ebe35c9 100644 --- a/setup.py +++ b/setup.py @@ -16,7 +16,7 @@ setup( license='MIT', description='Extensible memoizing collections and decorators', # noqa long_description=open('README.rst').read(), - keywords='cache caching LRU LFU', + keywords='cache caching LRU LFU TTL', classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Other Environment', @@ -27,8 +27,8 @@ setup( 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.4', - 'Topic :: Internet', 'Topic :: Software Development :: Libraries :: Python Modules' ], py_modules=['cachetools'], -- cgit v1.2.3 From 4176a4f09384325278d8bb6fc8cd309302c893fa Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Tue, 17 Jun 2014 11:08:04 +0200 Subject: Fix typos/formatting in documentation --- README.rst | 8 ++++---- docs/index.rst | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/README.rst b/README.rst index ac71176..4a7f5f0 100644 --- a/README.rst +++ b/README.rst @@ -27,10 +27,10 @@ of the cache exceeds its maximum size, the cache must choose which item(s) to discard based on a suitable `cache algorithm`_. In general, a cache's size is the sum of the size of its items. If -the size of each items is ``1``, a cache's size is equal to the number -of its items, i.e. ``len()``. An items's size may also be a property -or function of its value, e.g. the result of ``sys.getsizeof()``, or -``len()`` for string and sequence values. +the size of each items is 1, a cache's size is equal to the number of +its items, i.e. ``len(cache)``. An items's size may also be a +property or function of its value, e.g. the result of +``sys.getsizeof()``, or ``len()`` for string and sequence values. This module provides various cache implementations based on different cache algorithms, as well as decorators for easily memoizing function diff --git a/docs/index.rst b/docs/index.rst index e9fd406..77941b9 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -28,10 +28,10 @@ of the cache exceeds its maximum size, the cache must choose which item(s) to discard based on a suitable `cache algorithm`_. In general, a cache's size is the sum of the size of its items. If -the size of each items is :const:`1`, a cache's size is equal to the -number of its items, i.e. :func:`len`. An items's size may also be a -property or function of its value, e.g. the result of -:func:`sys.getsizeof`, or :func:`len` for string and sequence values. +the size of each item is 1, a cache's size is equal to the number of +its items, i.e. `len(cache)`. An items's size may also be a property +or function of its value, e.g. the result of :func:`sys.getsizeof`, or +:func:`len` for string and sequence values. This module provides various cache implementations based on different cache algorithms, as well as decorators for easily memoizing function -- cgit v1.2.3 From 78408a1c1a9018468941075c1b991161505bcbe1 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Thu, 18 Sep 2014 05:32:16 +0200 Subject: Prepare v0.5.0 --- Changes | 43 ++++++++++++++++++++++++++++++++----------- README.rst | 2 +- cachetools.py | 2 +- setup.cfg | 6 +++--- 4 files changed, 37 insertions(+), 16 deletions(-) diff --git a/Changes b/Changes index 4c69fef..3c0fd69 100644 --- a/Changes +++ b/Changes @@ -1,25 +1,46 @@ +0.5.0 UNRELEASED +---------------- + +- Update Changelog, README. + + 0.4.0 2014-06-16 +---------------- + +- Add `TTLCache`. + +- Add `Cache` base class. + +- Remove `@cachedmethod` `lock` parameter. -* Add TTLCache. -* Add Cache base class. -* Remove @cachedmethod lock parameter. 0.3.1 2014-05-07 +---------------- + +- Add proper locking for `cache_clear()` and `cache_info()`. + +- Report `size` in `cache_info()`. -* Add proper locking for cache_clear(), cache_info(). -* Report cache.size in cache_info(). 0.3.0 2014-05-06 +---------------- + +- Remove `@cache` decorator. + +- Add `size`, `getsizeof` members. + +- Add `@cachedmethod` decorator. -* Remove @cache decorator. -* Add size, getsizeof members. -* Add @cachedmethod decorator. 0.2.0 2014-04-02 +---------------- + +- Add `@cache` decorator. + +- Update documentation. -* Add @cache decorator. -* Update documentation. 0.1.0 2014-03-27 +---------------- -* Initial release. +- Initial release. diff --git a/README.rst b/README.rst index 4a7f5f0..02a366e 100644 --- a/README.rst +++ b/README.rst @@ -76,7 +76,7 @@ Licensed under the `MIT License`_. .. _cache algorithm: http://en.wikipedia.org/wiki/Cache_algorithms .. _Documentation: http://pythonhosted.org/cachetools/ -.. _Source Code: https://github.com/tkem/cachetools/ .. _Issue Tracker: https://github.com/tkem/cachetools/issues/ +.. _Source Code: https://github.com/tkem/cachetools/ .. _Change Log: http://raw.github.com/tkem/cachetools/master/Changes .. _MIT License: http://raw.github.com/tkem/cachetools/master/LICENSE diff --git a/cachetools.py b/cachetools.py index feec458..0f6a2ea 100644 --- a/cachetools.py +++ b/cachetools.py @@ -11,7 +11,7 @@ try: except ImportError: from dummy_threading import RLock -__version__ = '0.4.0' +__version__ = '0.5.0' _marker = object() diff --git a/setup.cfg b/setup.cfg index 53beacd..9ccea7a 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,3 +1,6 @@ +[flake8] +exclude = .git,docs + [build_sphinx] source-dir = docs/ build-dir = docs/_build @@ -5,6 +8,3 @@ all_files = 1 [upload_sphinx] upload-dir = docs/_build/html - -[flake8] -exclude = docs/* -- cgit v1.2.3 From a8d2b038f3331da782c73ed5755dc3f3d363d8fa Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Mon, 22 Sep 2014 17:59:04 +0200 Subject: Resolve #8: Rewrite @cachedmethod example to use per-object cache --- docs/index.rst | 51 +++++++++++++++++++++++++++++++-------------------- 1 file changed, 31 insertions(+), 20 deletions(-) diff --git a/docs/index.rst b/docs/index.rst index 77941b9..93dc71c 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -150,26 +150,37 @@ Method Decorators .. decorator:: cachedmethod(cache, typed=False) `cache` specifies a function of one argument that, when passed - :const:`self`, will return the cache object for the instance or - class. See the `Function Decorators`_ section for details on the - `typed` argument. - - Python 3 example of a shared (class) LRU cache for static web - content:: - - class CachedPEPs(object): - - cache = LRUCache(maxsize=32) - - @cachedmethod(operator.attrgetter('cache')) - def get_pep(self, num): - """Retrieve text of a Python Enhancement Proposal""" - resource = 'http://www.python.org/dev/peps/pep-%04d/' % num - try: - with urllib.request.urlopen(resource) as s: - return s.read() - except urllib.error.HTTPError: - return 'Not Found' + :const:`self`, will return a *cache* for the respective instance or + class. Multiple methods of an object or class may share the same + cache. + + One advantage of the `@cachedmethod` decorator over the similar + function decorators is that cache properties such as `maxsize` can + be set at runtime:: + + import operator + import urllib.request + + from cachetools import LRUCache, cachedmethod + + class CachedPEPs(object): + + def __init__(self, cachesize): + self.cache = LRUCache(maxsize=cachesize) + + @cachedmethod(operator.attrgetter('cache')) + def get_pep(self, num): + """Retrieve text of a Python Enhancement Proposal""" + url = 'http://www.python.org/dev/peps/pep-%04d/' % num + with urllib.request.urlopen(url) as s: + return s.read() + + peps = CachedPEPs(cachesize=10) + print("PEP #1: %s" % peps.get_pep(1)) + + Note that no locking will be performed on the object returned by + `cache(self)`, so dealing with concurrent access is entirely the + responsibility of the user. .. _mutable: http://docs.python.org/dev/glossary.html#term-mutable -- cgit v1.2.3 From e255ae776117772c35dfa54db3acd2410658ba8b Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Thu, 18 Sep 2014 06:27:08 +0200 Subject: Fix #6: Non-mutating TTLCache.__getitem__() --- Changes | 4 +- cachetools.py | 411 ----------------------------------------------- cachetools/__init__.py | 12 ++ cachetools/cache.py | 71 ++++++++ cachetools/decorators.py | 115 +++++++++++++ cachetools/lfucache.py | 35 ++++ cachetools/link.py | 10 ++ cachetools/lrucache.py | 72 +++++++++ cachetools/rrcache.py | 20 +++ cachetools/ttlcache.py | 106 ++++++++++++ docs/index.rst | 23 ++- setup.py | 4 +- tests/__init__.py | 50 ++++++ tests/test_lrucache.py | 51 +----- tests/test_ttlcache.py | 73 ++++++++- 15 files changed, 573 insertions(+), 484 deletions(-) delete mode 100644 cachetools.py create mode 100644 cachetools/__init__.py create mode 100644 cachetools/cache.py create mode 100644 cachetools/decorators.py create mode 100644 cachetools/lfucache.py create mode 100644 cachetools/link.py create mode 100644 cachetools/lrucache.py create mode 100644 cachetools/rrcache.py create mode 100644 cachetools/ttlcache.py diff --git a/Changes b/Changes index 3c0fd69..cd7d8dc 100644 --- a/Changes +++ b/Changes @@ -1,7 +1,9 @@ 0.5.0 UNRELEASED ---------------- -- Update Changelog, README. +- Update Changelog, README, documentation. + +- Do not delete expired items in TTLCache.__getitem__(). 0.4.0 2014-06-16 diff --git a/cachetools.py b/cachetools.py deleted file mode 100644 index 0f6a2ea..0000000 --- a/cachetools.py +++ /dev/null @@ -1,411 +0,0 @@ -"""Extensible memoizing collections and decorators""" - -import collections -import functools -import operator -import random -import time - -try: - from threading import RLock -except ImportError: - from dummy_threading import RLock - -__version__ = '0.5.0' - -_marker = object() - - -class _Link(object): - __slots__ = 'prev', 'next', 'data' - - -class Cache(collections.MutableMapping): - """Mutable mapping to serve as a simple cache or cache base class. - - This class discards arbitrary items using :meth:`popitem` to make - space when necessary. Derived classes may override - :meth:`popitem` to implement specific caching strategies. If a - subclass has to keep track of item access, insertion or deletion, - it may need override :meth:`__getitem__`, :meth:`__setitem__` and - :meth:`__delitem__`, too. - - """ - - def __init__(self, maxsize, getsizeof=None): - if getsizeof is not None: - self.getsizeof = getsizeof - self.__mapping = dict() - self.__maxsize = maxsize - self.__currsize = 0 - - def __getitem__(self, key): - return self.__mapping[key][0] - - def __setitem__(self, key, value): - mapping = self.__mapping - maxsize = self.__maxsize - size = self.getsizeof(value) - if size > maxsize: - raise ValueError('value too large') - if key not in mapping or mapping[key][1] < size: - while self.__currsize + size > maxsize: - self.popitem() - if key in mapping: - self.__currsize -= mapping[key][1] - mapping[key] = (value, size) - self.__currsize += size - - def __delitem__(self, key): - _, size = self.__mapping.pop(key) - self.__currsize -= size - - def __iter__(self): - return iter(self.__mapping) - - def __len__(self): - return len(self.__mapping) - - def __repr__(self): - return '%s(%r, maxsize=%d, currsize=%d)' % ( - self.__class__.__name__, - list(self.items()), - self.__maxsize, - self.__currsize, - ) - - @property - def maxsize(self): - """Return the maximum size of the cache.""" - return self.__maxsize - - @property - def currsize(self): - """Return the current size of the cache.""" - return self.__currsize - - @staticmethod - def getsizeof(value): - """Return the size of a cache element.""" - return 1 - - -class RRCache(Cache): - """Random Replacement (RR) cache implementation. - - This class randomly selects candidate items and discards them to - make space when necessary. - - """ - - def popitem(self): - """Remove and return a random `(key, value)` pair.""" - try: - key = random.choice(list(self)) - except IndexError: - raise KeyError('cache is empty') - return (key, self.pop(key)) - - -class LFUCache(Cache): - """Least Frequently Used (LFU) cache implementation. - - This class counts how often an item is retrieved, and discards the - items used least often to make space when necessary. - - """ - - def __init__(self, maxsize, getsizeof=None): - if getsizeof is not None: - Cache.__init__(self, maxsize, lambda e: getsizeof(e[0])) - else: - Cache.__init__(self, maxsize) - - def __getitem__(self, key, cache_getitem=Cache.__getitem__): - entry = cache_getitem(self, key) - entry[1] += 1 - return entry[0] - - def __setitem__(self, key, value, cache_setitem=Cache.__setitem__): - cache_setitem(self, key, [value, 0]) - - def popitem(self): - """Remove and return the `(key, value)` pair least frequently used.""" - items = ((key, Cache.__getitem__(self, key)[1]) for key in self) - try: - key, _ = min(items, key=operator.itemgetter(1)) - except ValueError: - raise KeyError('cache is empty') - return (key, self.pop(key)) - - -class LRUCache(Cache): - """Least Recently Used (LRU) cache implementation. - - This class discards the least recently used items first to make - space when necessary. - - """ - - def __init__(self, maxsize, getsizeof=None): - if getsizeof is not None: - Cache.__init__(self, maxsize, lambda e: getsizeof(e[0])) - else: - Cache.__init__(self, maxsize) - root = _Link() - root.prev = root.next = root - self.__root = root - - def __getitem__(self, key, cache_getitem=Cache.__getitem__): - value, link = cache_getitem(self, key) - root = self.__root - link.prev.next = link.next - link.next.prev = link.prev - link.prev = tail = root.prev - link.next = root - tail.next = root.prev = link - return value - - def __setitem__(self, key, value, - cache_getitem=Cache.__getitem__, - cache_setitem=Cache.__setitem__): - try: - _, link = cache_getitem(self, key) - except KeyError: - link = _Link() - cache_setitem(self, key, (value, link)) - try: - link.prev.next = link.next - link.next.prev = link.prev - except AttributeError: - link.data = key - root = self.__root - link.prev = tail = root.prev - link.next = root - tail.next = root.prev = link - - def __delitem__(self, key, - cache_getitem=Cache.__getitem__, - cache_delitem=Cache.__delitem__): - _, link = cache_getitem(self, key) - cache_delitem(self, key) - link.prev.next = link.next - link.next.prev = link.prev - del link.next - del link.prev - - def __repr__(self, cache_getitem=Cache.__getitem__): - return '%s(%r, maxsize=%d, currsize=%d)' % ( - self.__class__.__name__, - [(key, cache_getitem(self, key)[0]) for key in self], - self.maxsize, - self.currsize, - ) - - def popitem(self): - """Remove and return the `(key, value)` pair least recently used.""" - root = self.__root - link = root.next - if link is root: - raise KeyError('cache is empty') - key = link.data - return (key, self.pop(key)) - - -class TTLCache(LRUCache): - """Cache implementation with per-item time-to-live (TTL) value. - - This class associates a time-to-live value with each item. Items - that expire because they have exceeded their time-to-live will be - removed automatically. If no expired items are there to remove, - the least recently used items will be discarded first to make - space when necessary. - - """ - - def __init__(self, maxsize, ttl, getsizeof=None, timer=time.time): - if getsizeof is not None: - LRUCache.__init__(self, maxsize, lambda e: getsizeof(e[0])) - else: - LRUCache.__init__(self, maxsize) - root = _Link() - root.prev = root.next = root - self.__root = root - self.__timer = timer - self.__ttl = ttl - - def __getitem__(self, key, - cache_getitem=LRUCache.__getitem__, - cache_delitem=LRUCache.__delitem__): - value, link = cache_getitem(self, key) - if self.__timer() < link.data[1]: - return value - root = self.__root - head = root.next - link = link.next - while head is not link: - cache_delitem(self, head.data[0]) - head.next.prev = root - head = root.next = head.next - raise KeyError('%r has expired' % key) - - def __setitem__(self, key, value, - cache_getitem=LRUCache.__getitem__, - cache_setitem=LRUCache.__setitem__, - cache_delitem=LRUCache.__delitem__): - root = self.__root - head = root.next - time = self.__timer() - while head is not root and head.data[1] < time: - cache_delitem(self, head.data[0]) - head.next.prev = root - head = root.next = head.next - try: - _, link = cache_getitem(self, key) - except KeyError: - link = _Link() - cache_setitem(self, key, (value, link)) - try: - link.prev.next = link.next - link.next.prev = link.prev - except AttributeError: - pass - link.data = (key, time + self.__ttl) - link.prev = tail = root.prev - link.next = root - tail.next = root.prev = link - - def __delitem__(self, key, - cache_getitem=LRUCache.__getitem__, - cache_delitem=LRUCache.__delitem__): - _, link = cache_getitem(self, key) - cache_delitem(self, key) - link.prev.next = link.next - link.next.prev = link.prev - - def __repr__(self, cache_getitem=LRUCache.__getitem__): - return '%s(%r, maxsize=%d, currsize=%d)' % ( - self.__class__.__name__, - [(key, cache_getitem(self, key)[0]) for key in self], - self.maxsize, - self.currsize, - ) - - def pop(self, key, default=_marker): - try: - value, link = LRUCache.__getitem__(self, key) - except KeyError: - if default is not _marker: - return default - raise - LRUCache.__delitem__(self, key) - link.prev.next = link.next - link.next.prev = link.prev - del link.next - del link.prev - return value - - -CacheInfo = collections.namedtuple('CacheInfo', 'hits misses maxsize currsize') - - -def _makekey(args, kwargs): - return (args, tuple(sorted(kwargs.items()))) - - -def _makekey_typed(args, kwargs): - key = _makekey(args, kwargs) - key += tuple(type(v) for v in args) - key += tuple(type(v) for k, v in sorted(kwargs.items())) - return key - - -def _cachedfunc(cache, makekey, lock): - def decorator(func): - stats = [0, 0] - - def wrapper(*args, **kwargs): - key = makekey(args, kwargs) - with lock: - try: - result = cache[key] - stats[0] += 1 - return result - except KeyError: - stats[1] += 1 - result = func(*args, **kwargs) - with lock: - cache[key] = result - return result - - def cache_info(): - with lock: - hits, misses = stats - maxsize = cache.maxsize - currsize = cache.currsize - return CacheInfo(hits, misses, maxsize, currsize) - - def cache_clear(): - with lock: - cache.clear() - - wrapper.cache_info = cache_info - wrapper.cache_clear = cache_clear - return functools.update_wrapper(wrapper, func) - - return decorator - - -def lru_cache(maxsize=128, typed=False, getsizeof=None, lock=RLock): - """Decorator to wrap a function with a memoizing callable that saves - up to `maxsize` results based on a Least Recently Used (LRU) - algorithm. - - """ - makekey = _makekey_typed if typed else _makekey - return _cachedfunc(LRUCache(maxsize, getsizeof), makekey, lock()) - - -def lfu_cache(maxsize=128, typed=False, getsizeof=None, lock=RLock): - """Decorator to wrap a function with a memoizing callable that saves - up to `maxsize` results based on a Least Frequently Used (LFU) - algorithm. - - """ - makekey = _makekey_typed if typed else _makekey - return _cachedfunc(LFUCache(maxsize, getsizeof), makekey, lock()) - - -def rr_cache(maxsize=128, typed=False, getsizeof=None, lock=RLock): - """Decorator to wrap a function with a memoizing callable that saves - up to `maxsize` results based on a Random Replacement (RR) - algorithm. - - """ - makekey = _makekey_typed if typed else _makekey - return _cachedfunc(RRCache(maxsize, getsizeof), makekey, lock()) - - -def cachedmethod(cache, typed=False): - """Decorator to wrap a class or instance method with a memoizing - callable that saves results in a (possibly shared) cache. - - """ - makekey = _makekey_typed if typed else _makekey - - def decorator(method): - def wrapper(self, *args, **kwargs): - # TODO: `shared`, locking... - key = makekey((method,) + args, kwargs) - mapping = cache(self) - try: - return mapping[key] - except KeyError: - pass - result = method(self, *args, **kwargs) - mapping[key] = result - return result - - return functools.update_wrapper(wrapper, method) - - return decorator diff --git a/cachetools/__init__.py b/cachetools/__init__.py new file mode 100644 index 0000000..c999d9d --- /dev/null +++ b/cachetools/__init__.py @@ -0,0 +1,12 @@ +"""Extensible memoizing collections and decorators""" + +# flake8: noqa + +from .cache import Cache +from .rrcache import RRCache +from .lfucache import LFUCache +from .lrucache import LRUCache +from .ttlcache import TTLCache +from .decorators import rr_cache, lfu_cache, lru_cache, cachedmethod + +__version__ = '0.5.0' diff --git a/cachetools/cache.py b/cachetools/cache.py new file mode 100644 index 0000000..d5176eb --- /dev/null +++ b/cachetools/cache.py @@ -0,0 +1,71 @@ +import collections + + +class Cache(collections.MutableMapping): + """Mutable mapping to serve as a simple cache or cache base class. + + This class discards arbitrary items using :meth:`popitem` to make + space when necessary. Derived classes may override + :meth:`popitem` to implement specific caching strategies. If a + subclass has to keep track of item access, insertion or deletion, + it may need override :meth:`__getitem__`, :meth:`__setitem__` and + :meth:`__delitem__`, too. + + """ + + def __init__(self, maxsize, getsizeof=None): + if getsizeof is not None: + self.getsizeof = getsizeof + self.__mapping = dict() + self.__maxsize = maxsize + self.__currsize = 0 + + def __repr__(self): + return '%s(%r, maxsize=%d, currsize=%d)' % ( + self.__class__.__name__, + list(self.items()), + self.__maxsize, + self.__currsize, + ) + + def __getitem__(self, key): + return self.__mapping[key][0] + + def __setitem__(self, key, value): + mapping = self.__mapping + maxsize = self.__maxsize + size = self.getsizeof(value) + if size > maxsize: + raise ValueError('value too large') + if key not in mapping or mapping[key][1] < size: + while self.__currsize + size > maxsize: + self.popitem() + if key in mapping: + self.__currsize -= mapping[key][1] + mapping[key] = (value, size) + self.__currsize += size + + def __delitem__(self, key): + _, size = self.__mapping.pop(key) + self.__currsize -= size + + def __iter__(self): + return iter(self.__mapping) + + def __len__(self): + return len(self.__mapping) + + @property + def maxsize(self): + """Return the maximum size of the cache.""" + return self.__maxsize + + @property + def currsize(self): + """Return the current size of the cache.""" + return self.__currsize + + @staticmethod + def getsizeof(value): + """Return the size of a cache element.""" + return 1 diff --git a/cachetools/decorators.py b/cachetools/decorators.py new file mode 100644 index 0000000..9104013 --- /dev/null +++ b/cachetools/decorators.py @@ -0,0 +1,115 @@ +from .rrcache import RRCache +from .lfucache import LFUCache +from .lrucache import LRUCache + +import collections +import functools + +try: + from threading import RLock +except ImportError: + from dummy_threading import RLock + +CacheInfo = collections.namedtuple('CacheInfo', 'hits misses maxsize currsize') + + +def _makekey(args, kwargs): + return (args, tuple(sorted(kwargs.items()))) + + +def _makekey_typed(args, kwargs): + key = _makekey(args, kwargs) + key += tuple(type(v) for v in args) + key += tuple(type(v) for k, v in sorted(kwargs.items())) + return key + + +def _cachedfunc(cache, makekey, lock): + def decorator(func): + stats = [0, 0] + + def wrapper(*args, **kwargs): + key = makekey(args, kwargs) + with lock: + try: + result = cache[key] + stats[0] += 1 + return result + except KeyError: + stats[1] += 1 + result = func(*args, **kwargs) + with lock: + cache[key] = result + return result + + def cache_info(): + with lock: + hits, misses = stats + maxsize = cache.maxsize + currsize = cache.currsize + return CacheInfo(hits, misses, maxsize, currsize) + + def cache_clear(): + with lock: + cache.clear() + + wrapper.cache_info = cache_info + wrapper.cache_clear = cache_clear + return functools.update_wrapper(wrapper, func) + + return decorator + + +def rr_cache(maxsize=128, typed=False, getsizeof=None, lock=RLock): + """Decorator to wrap a function with a memoizing callable that saves + up to `maxsize` results based on a Random Replacement (RR) + algorithm. + + """ + makekey = _makekey_typed if typed else _makekey + return _cachedfunc(RRCache(maxsize, getsizeof), makekey, lock()) + + +def lfu_cache(maxsize=128, typed=False, getsizeof=None, lock=RLock): + """Decorator to wrap a function with a memoizing callable that saves + up to `maxsize` results based on a Least Frequently Used (LFU) + algorithm. + + """ + makekey = _makekey_typed if typed else _makekey + return _cachedfunc(LFUCache(maxsize, getsizeof), makekey, lock()) + + +def lru_cache(maxsize=128, typed=False, getsizeof=None, lock=RLock): + """Decorator to wrap a function with a memoizing callable that saves + up to `maxsize` results based on a Least Recently Used (LRU) + algorithm. + + """ + makekey = _makekey_typed if typed else _makekey + return _cachedfunc(LRUCache(maxsize, getsizeof), makekey, lock()) + + +def cachedmethod(cache, typed=False): + """Decorator to wrap a class or instance method with a memoizing + callable that saves results in a (possibly shared) cache. + + """ + makekey = _makekey_typed if typed else _makekey + + def decorator(method): + def wrapper(self, *args, **kwargs): + # TODO: `shared`, locking... + key = makekey((method,) + args, kwargs) + mapping = cache(self) + try: + return mapping[key] + except KeyError: + pass + result = method(self, *args, **kwargs) + mapping[key] = result + return result + + return functools.update_wrapper(wrapper, method) + + return decorator diff --git a/cachetools/lfucache.py b/cachetools/lfucache.py new file mode 100644 index 0000000..9ea66d1 --- /dev/null +++ b/cachetools/lfucache.py @@ -0,0 +1,35 @@ +from .cache import Cache + +import operator + + +class LFUCache(Cache): + """Least Frequently Used (LFU) cache implementation. + + This class counts how often an item is retrieved, and discards the + items used least often to make space when necessary. + + """ + + def __init__(self, maxsize, getsizeof=None): + if getsizeof is not None: + Cache.__init__(self, maxsize, lambda e: getsizeof(e[0])) + else: + Cache.__init__(self, maxsize) + + def __getitem__(self, key, cache_getitem=Cache.__getitem__): + entry = cache_getitem(self, key) + entry[1] += 1 + return entry[0] + + def __setitem__(self, key, value, cache_setitem=Cache.__setitem__): + cache_setitem(self, key, [value, 0]) + + def popitem(self): + """Remove and return the `(key, value)` pair least frequently used.""" + items = ((key, Cache.__getitem__(self, key)[1]) for key in self) + try: + key, _ = min(items, key=operator.itemgetter(1)) + except ValueError: + raise KeyError('cache is empty') + return (key, self.pop(key)) diff --git a/cachetools/link.py b/cachetools/link.py new file mode 100644 index 0000000..122735c --- /dev/null +++ b/cachetools/link.py @@ -0,0 +1,10 @@ +class Link(object): + __slots__ = 'prev', 'next', 'data' + + def unlink(self): + next = self.next + prev = self.prev + prev.next = next + next.prev = prev + del self.next + del self.prev diff --git a/cachetools/lrucache.py b/cachetools/lrucache.py new file mode 100644 index 0000000..e815545 --- /dev/null +++ b/cachetools/lrucache.py @@ -0,0 +1,72 @@ +from .cache import Cache +from .link import Link + + +class LRUCache(Cache): + """Least Recently Used (LRU) cache implementation. + + This class discards the least recently used items first to make + space when necessary. + + """ + + def __init__(self, maxsize, getsizeof=None): + if getsizeof is not None: + Cache.__init__(self, maxsize, lambda e: getsizeof(e[0])) + else: + Cache.__init__(self, maxsize) + root = Link() + root.prev = root.next = root + self.__root = root + + def __repr__(self, cache_getitem=Cache.__getitem__): + return '%s(%r, maxsize=%d, currsize=%d)' % ( + self.__class__.__name__, + [(key, cache_getitem(self, key)[0]) for key in self], + self.maxsize, + self.currsize, + ) + + def __getitem__(self, key, cache_getitem=Cache.__getitem__): + value, link = cache_getitem(self, key) + root = self.__root + link.prev.next = link.next + link.next.prev = link.prev + link.prev = tail = root.prev + link.next = root + tail.next = root.prev = link + return value + + def __setitem__(self, key, value, + cache_getitem=Cache.__getitem__, + cache_setitem=Cache.__setitem__): + try: + _, link = cache_getitem(self, key) + except KeyError: + link = Link() + cache_setitem(self, key, (value, link)) + try: + link.prev.next = link.next + link.next.prev = link.prev + except AttributeError: + link.data = key + root = self.__root + link.prev = tail = root.prev + link.next = root + tail.next = root.prev = link + + def __delitem__(self, key, + cache_getitem=Cache.__getitem__, + cache_delitem=Cache.__delitem__): + _, link = cache_getitem(self, key) + cache_delitem(self, key) + link.unlink() + + def popitem(self): + """Remove and return the `(key, value)` pair least recently used.""" + root = self.__root + link = root.next + if link is root: + raise KeyError('cache is empty') + key = link.data + return (key, self.pop(key)) diff --git a/cachetools/rrcache.py b/cachetools/rrcache.py new file mode 100644 index 0000000..3042a4e --- /dev/null +++ b/cachetools/rrcache.py @@ -0,0 +1,20 @@ +from .cache import Cache + +import random + + +class RRCache(Cache): + """Random Replacement (RR) cache implementation. + + This class randomly selects candidate items and discards them to + make space when necessary. + + """ + + def popitem(self): + """Remove and return a random `(key, value)` pair.""" + try: + key = random.choice(list(self)) + except IndexError: + raise KeyError('cache is empty') + return (key, self.pop(key)) diff --git a/cachetools/ttlcache.py b/cachetools/ttlcache.py new file mode 100644 index 0000000..bb38f2b --- /dev/null +++ b/cachetools/ttlcache.py @@ -0,0 +1,106 @@ +from .lrucache import LRUCache +from .link import Link + +import time + +_marker = object() + + +class TTLCache(LRUCache): + """Cache implementation with per-item time-to-live (TTL) value. + + This class associates a time-to-live value with each item. Items + that expire because they have exceeded their time-to-live will be + removed automatically. If no expired items are there to remove, + the least recently used items will be discarded first to make + space when necessary. + + """ + + def __init__(self, maxsize, ttl, timer=time.time, getsizeof=None): + if getsizeof is not None: + LRUCache.__init__(self, maxsize, lambda e: getsizeof(e[0])) + else: + LRUCache.__init__(self, maxsize) + root = Link() + root.prev = root.next = root + self.__root = root + self.__timer = timer + self.__ttl = ttl + + def __repr__(self, cache_getitem=LRUCache.__getitem__): + return '%s(%r, maxsize=%d, currsize=%d)' % ( + self.__class__.__name__, + [(key, cache_getitem(self, key)[0]) for key in self], + self.maxsize, + self.currsize, + ) + + def __getitem__(self, key, cache_getitem=LRUCache.__getitem__): + value, link = cache_getitem(self, key) + if link.data[1] < self.__timer(): + raise KeyError('%r has expired' % key) + return value + + def __setitem__(self, key, value, + cache_getitem=LRUCache.__getitem__, + cache_setitem=LRUCache.__setitem__, + cache_delitem=LRUCache.__delitem__): + time = self.__timer() + self.expire(time) + try: + _, link = cache_getitem(self, key) + except KeyError: + link = Link() + cache_setitem(self, key, (value, link)) + try: + link.prev.next = link.next + link.next.prev = link.prev + except AttributeError: + pass + root = self.__root + link.data = (key, time + self.__ttl) + link.prev = tail = root.prev + link.next = root + tail.next = root.prev = link + + def __delitem__(self, key, + cache_getitem=LRUCache.__getitem__, + cache_delitem=LRUCache.__delitem__): + _, link = cache_getitem(self, key) + cache_delitem(self, key) + link.unlink() + self.expire() + + @property + def ttl(self): + """Return the time-to-live of the cache.""" + return self.__ttl + + @property + def timer(self): + """Return the timer used by the cache.""" + return self.__timer + + def pop(self, key, default=_marker): + try: + value, link = LRUCache.__getitem__(self, key) + except KeyError: + if default is _marker: + raise + return default + LRUCache.__delitem__(self, key) + link.unlink() + self.expire() + return value + + def expire(self, time=None, cache_delitem=LRUCache.__delitem__): + """TODO""" + if time is None: + time = self.__timer() + root = self.__root + head = root.next + while head is not root and head.data[1] < time: + cache_delitem(self, head.data[0]) + head.next.prev = root + head = root.next = head.next diff --git a/docs/index.rst b/docs/index.rst index 93dc71c..66e27fb 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -52,9 +52,9 @@ current size of the cache. returns the size of a given item and may be overridden by subclasses. The default implementation of :meth:`getsizeof` returns :const:`1` irrespective of its `value` argument. For convenience, all cache -classes also accept an optional constructor parameter `getsizeof`, -that may specify a function of one argument used to extract the size -of an item's value instead of the class' :meth:`getsizeof` method. +classes also accept an optional named constructor parameter +`getsizeof`, that may specify a function of one argument used to +retrieve the size of an item's value. .. autoclass:: Cache @@ -73,8 +73,8 @@ of an item's value instead of the class' :meth:`getsizeof` method. :members: Note that a cache item may expire at *any* time, so iterating over - the items of a :class:`TTLCache` may raise :class:`KeyError` or - :class:`RuntimeError` unexpectedly:: + the items of a :class:`TTLCache` may raise :class:`KeyError` + unexpectedly:: from cachetools import TTLCache import time @@ -83,14 +83,11 @@ of an item's value instead of the class' :meth:`getsizeof` method. cache.update({1: 1, 2: 2, 3: 3}) time.sleep(1) - try: - for key in cache: - try: - print(cache[key]) - except KeyError: - print('Key %r has expired' % key) - except RuntimeError as e: - print(e) + for key in cache: + try: + print(cache[key]) + except KeyError: + print('Key %r has expired' % key) Function Decorators diff --git a/setup.py b/setup.py index ebe35c9..ef6ba8e 100644 --- a/setup.py +++ b/setup.py @@ -9,7 +9,7 @@ def get_version(filename): setup( name='cachetools', - version=get_version('cachetools.py'), + version=get_version('cachetools/__init__.py'), author='Thomas Kemmer', author_email='tkemmer@computer.org', url='https://github.com/tkem/cachetools', @@ -31,6 +31,6 @@ setup( 'Programming Language :: Python :: 3.4', 'Topic :: Software Development :: Libraries :: Python Modules' ], - py_modules=['cachetools'], + packages=['cachetools'], test_suite='tests' ) diff --git a/tests/__init__.py b/tests/__init__.py index 4ab4a11..7048bfd 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -124,3 +124,53 @@ class CacheTestMixin(object): self.assertEqual(1, len(cache)) self.assertEqual(3, cache.currsize) self.assertEqual(3, cache[3]) + + +class LRUCacheTestMixin(CacheTestMixin): + + def test_lru_insert(self): + cache = self.make_cache(maxsize=2) + + cache[1] = 1 + cache[2] = 2 + cache[3] = 3 + + self.assertEqual(len(cache), 2) + self.assertEqual(cache[2], 2) + self.assertEqual(cache[3], 3) + self.assertNotIn(1, cache) + + cache[2] + cache[4] = 4 + self.assertEqual(len(cache), 2) + self.assertEqual(cache[2], 2) + self.assertEqual(cache[4], 4) + self.assertNotIn(3, cache) + + cache[5] = 5 + self.assertEqual(len(cache), 2) + self.assertEqual(cache[4], 4) + self.assertEqual(cache[5], 5) + self.assertNotIn(2, cache) + + def test_lru_getsizeof(self): + cache = self.make_cache(maxsize=3, getsizeof=lambda x: x) + + cache[1] = 1 + cache[2] = 2 + + self.assertEqual(len(cache), 2) + self.assertEqual(cache[1], 1) + self.assertEqual(cache[2], 2) + + cache[3] = 3 + + self.assertEqual(len(cache), 1) + self.assertEqual(cache[3], 3) + self.assertNotIn(1, cache) + self.assertNotIn(2, cache) + + with self.assertRaises(ValueError): + cache[4] = 4 + self.assertEqual(len(cache), 1) + self.assertEqual(cache[3], 3) diff --git a/tests/test_lrucache.py b/tests/test_lrucache.py index 242a0e7..bdbb39d 100644 --- a/tests/test_lrucache.py +++ b/tests/test_lrucache.py @@ -1,6 +1,6 @@ import unittest -from . import CacheTestMixin +from . import LRUCacheTestMixin from cachetools import LRUCache, lru_cache @@ -14,58 +14,11 @@ def cached_typed(n): return n -class LRUCacheTest(unittest.TestCase, CacheTestMixin): +class LRUCacheTest(unittest.TestCase, LRUCacheTestMixin): def make_cache(self, maxsize, getsizeof=None): return LRUCache(maxsize, getsizeof) - def test_lru_insert(self): - cache = self.make_cache(maxsize=2) - - cache[1] = 1 - cache[2] = 2 - cache[3] = 3 - - self.assertEqual(len(cache), 2) - self.assertEqual(cache[2], 2) - self.assertEqual(cache[3], 3) - self.assertNotIn(1, cache) - - cache[2] - cache[4] = 4 - self.assertEqual(len(cache), 2) - self.assertEqual(cache[2], 2) - self.assertEqual(cache[4], 4) - self.assertNotIn(3, cache) - - cache[5] = 5 - self.assertEqual(len(cache), 2) - self.assertEqual(cache[4], 4) - self.assertEqual(cache[5], 5) - self.assertNotIn(2, cache) - - def test_lru_getsizeof(self): - cache = self.make_cache(maxsize=3, getsizeof=lambda x: x) - - cache[1] = 1 - cache[2] = 2 - - self.assertEqual(len(cache), 2) - self.assertEqual(cache[1], 1) - self.assertEqual(cache[2], 2) - - cache[3] = 3 - - self.assertEqual(len(cache), 1) - self.assertEqual(cache[3], 3) - self.assertNotIn(1, cache) - self.assertNotIn(2, cache) - - with self.assertRaises(ValueError): - cache[4] = 4 - self.assertEqual(len(cache), 1) - self.assertEqual(cache[3], 3) - def test_decorator(self): self.assertEqual(cached.cache_info(), (0, 0, 2, 0)) self.assertEqual(cached(1), 1) diff --git a/tests/test_ttlcache.py b/tests/test_ttlcache.py index 5e89642..6bf2d5e 100644 --- a/tests/test_ttlcache.py +++ b/tests/test_ttlcache.py @@ -1,24 +1,81 @@ import unittest -from . import CacheTestMixin +from . import LRUCacheTestMixin from cachetools import TTLCache -class TTLCacheTest(unittest.TestCase, CacheTestMixin): +class TTLCacheTest(unittest.TestCase, LRUCacheTestMixin): - def make_cache(self, maxsize, getsizeof=None, ttl=86400): - return TTLCache(maxsize, ttl, getsizeof) + def make_cache(self, maxsize, getsizeof=None): + return TTLCache(maxsize, ttl=0, timer=lambda: 0, getsizeof=getsizeof) - def test_ttl(self): - cache = self.make_cache(maxsize=2, ttl=0) + def make_ttl_cache(self, maxsize, ttl): + class Timer: + def __init__(self): + self.__time = 0 + + def __call__(self): + return self.__time + + def inc(self): + self.__time = self.__time + 1 + + return TTLCache(maxsize, ttl, timer=Timer()) + + def test_ttl_insert(self): + cache = self.make_ttl_cache(maxsize=2, ttl=2) + self.assertEqual(cache.ttl, 2) cache[1] = 1 + + self.assertEqual(1, len(cache)) + self.assertEqual(1, cache[1]) + + cache.timer.inc() cache[2] = 2 + + self.assertEqual(2, len(cache)) + self.assertEqual(1, cache[1]) + self.assertEqual(2, cache[2]) + + cache.timer.inc() + cache[1] cache[3] = 3 + self.assertEqual(2, len(cache)) + self.assertEqual(1, cache[1]) + self.assertNotIn(2, cache) + self.assertEqual(3, cache[3]) + + def test_ttl_expire(self): + cache = self.make_ttl_cache(maxsize=3, ttl=0) + self.assertEqual(cache.ttl, 0) + + cache[1] = 1 + self.assertEqual(1, cache[1]) + cache.timer.inc() with self.assertRaises(KeyError): cache[1] + cache[2] = 2 + self.assertEqual(2, cache[2]) + cache.timer.inc() with self.assertRaises(KeyError): cache[2] - with self.assertRaises(KeyError): - cache[3] + cache[3] = 3 + self.assertEqual(3, cache[3]) + + cache.expire(1) + self.assertNotIn(1, cache) + self.assertEqual(3, cache[3]) + + cache.expire(2) + self.assertNotIn(1, cache) + self.assertNotIn(2, cache) + self.assertEqual(3, cache[3]) + + cache.timer.inc() + cache.expire() + self.assertEqual(0, len(cache)) + self.assertNotIn(1, cache) + self.assertNotIn(2, cache) + self.assertNotIn(3, cache) -- cgit v1.2.3 From 5ae3b9d9dfe2fd7aa64670f4bd5534ab9ca96a79 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Mon, 22 Sep 2014 20:05:53 +0200 Subject: Resolve #9: Add @ttl_cache function decorator. --- Changes | 2 ++ cachetools/__init__.py | 19 +++++++++++------- cachetools/decorators.py | 48 +++++---------------------------------------- cachetools/lfucache.py | 11 +++++++++++ cachetools/lock.py | 6 ++++++ cachetools/lrucache.py | 11 +++++++++++ cachetools/rrcache.py | 11 +++++++++++ cachetools/ttlcache.py | 11 +++++++++++ docs/index.rst | 14 +++++++++---- tests/test_cachedmethod.py | 49 ++++++++++++++++++++++++++++++++++++++++++++++ tests/test_method.py | 49 ---------------------------------------------- tests/test_ttlcache.py | 35 ++++++++++++++++++++++++++++++++- 12 files changed, 162 insertions(+), 104 deletions(-) create mode 100644 cachetools/lock.py create mode 100644 tests/test_cachedmethod.py delete mode 100644 tests/test_method.py diff --git a/Changes b/Changes index cd7d8dc..6059080 100644 --- a/Changes +++ b/Changes @@ -5,6 +5,8 @@ - Do not delete expired items in TTLCache.__getitem__(). +- Add `@ttl_cache` function decorator. + 0.4.0 2014-06-16 ---------------- diff --git a/cachetools/__init__.py b/cachetools/__init__.py index c999d9d..c43a323 100644 --- a/cachetools/__init__.py +++ b/cachetools/__init__.py @@ -1,12 +1,17 @@ """Extensible memoizing collections and decorators""" -# flake8: noqa - from .cache import Cache -from .rrcache import RRCache -from .lfucache import LFUCache -from .lrucache import LRUCache -from .ttlcache import TTLCache -from .decorators import rr_cache, lfu_cache, lru_cache, cachedmethod +from .rrcache import RRCache, rr_cache +from .lfucache import LFUCache, lfu_cache +from .lrucache import LRUCache, lru_cache +from .ttlcache import TTLCache, ttl_cache +from .decorators import cachedmethod + +__all__ = ( + 'Cache', + 'RRCache', 'LFUCache', 'LRUCache', 'TTLCache', + 'rr_cache', 'lfu_cache', 'lru_cache', 'ttl_cache', + 'cachedmethod' +) __version__ = '0.5.0' diff --git a/cachetools/decorators.py b/cachetools/decorators.py index 9104013..2407376 100644 --- a/cachetools/decorators.py +++ b/cachetools/decorators.py @@ -1,15 +1,6 @@ -from .rrcache import RRCache -from .lfucache import LFUCache -from .lrucache import LRUCache - import collections import functools -try: - from threading import RLock -except ImportError: - from dummy_threading import RLock - CacheInfo = collections.namedtuple('CacheInfo', 'hits misses maxsize currsize') @@ -20,11 +11,13 @@ def _makekey(args, kwargs): def _makekey_typed(args, kwargs): key = _makekey(args, kwargs) key += tuple(type(v) for v in args) - key += tuple(type(v) for k, v in sorted(kwargs.items())) + key += tuple(type(v) for _, v in sorted(kwargs.items())) return key -def _cachedfunc(cache, makekey, lock): +def cachedfunc(cache, typed, lock): + makekey = _makekey_typed if typed else _makekey + def decorator(func): stats = [0, 0] @@ -60,36 +53,6 @@ def _cachedfunc(cache, makekey, lock): return decorator -def rr_cache(maxsize=128, typed=False, getsizeof=None, lock=RLock): - """Decorator to wrap a function with a memoizing callable that saves - up to `maxsize` results based on a Random Replacement (RR) - algorithm. - - """ - makekey = _makekey_typed if typed else _makekey - return _cachedfunc(RRCache(maxsize, getsizeof), makekey, lock()) - - -def lfu_cache(maxsize=128, typed=False, getsizeof=None, lock=RLock): - """Decorator to wrap a function with a memoizing callable that saves - up to `maxsize` results based on a Least Frequently Used (LFU) - algorithm. - - """ - makekey = _makekey_typed if typed else _makekey - return _cachedfunc(LFUCache(maxsize, getsizeof), makekey, lock()) - - -def lru_cache(maxsize=128, typed=False, getsizeof=None, lock=RLock): - """Decorator to wrap a function with a memoizing callable that saves - up to `maxsize` results based on a Least Recently Used (LRU) - algorithm. - - """ - makekey = _makekey_typed if typed else _makekey - return _cachedfunc(LRUCache(maxsize, getsizeof), makekey, lock()) - - def cachedmethod(cache, typed=False): """Decorator to wrap a class or instance method with a memoizing callable that saves results in a (possibly shared) cache. @@ -99,9 +62,8 @@ def cachedmethod(cache, typed=False): def decorator(method): def wrapper(self, *args, **kwargs): - # TODO: `shared`, locking... - key = makekey((method,) + args, kwargs) mapping = cache(self) + key = makekey((method,) + args, kwargs) try: return mapping[key] except KeyError: diff --git a/cachetools/lfucache.py b/cachetools/lfucache.py index 9ea66d1..a0c2554 100644 --- a/cachetools/lfucache.py +++ b/cachetools/lfucache.py @@ -1,4 +1,6 @@ from .cache import Cache +from .decorators import cachedfunc +from .lock import RLock import operator @@ -33,3 +35,12 @@ class LFUCache(Cache): except ValueError: raise KeyError('cache is empty') return (key, self.pop(key)) + + +def lfu_cache(maxsize=128, typed=False, getsizeof=None, lock=RLock): + """Decorator to wrap a function with a memoizing callable that saves + up to `maxsize` results based on a Least Frequently Used (LFU) + algorithm. + + """ + return cachedfunc(LFUCache(maxsize, getsizeof), typed, lock()) diff --git a/cachetools/lock.py b/cachetools/lock.py new file mode 100644 index 0000000..33c7c89 --- /dev/null +++ b/cachetools/lock.py @@ -0,0 +1,6 @@ +# flake8: noqa + +try: + from threading import RLock +except ImportError: + from dummy_threading import RLock diff --git a/cachetools/lrucache.py b/cachetools/lrucache.py index e815545..4143f38 100644 --- a/cachetools/lrucache.py +++ b/cachetools/lrucache.py @@ -1,5 +1,7 @@ from .cache import Cache +from .decorators import cachedfunc from .link import Link +from .lock import RLock class LRUCache(Cache): @@ -70,3 +72,12 @@ class LRUCache(Cache): raise KeyError('cache is empty') key = link.data return (key, self.pop(key)) + + +def lru_cache(maxsize=128, typed=False, getsizeof=None, lock=RLock): + """Decorator to wrap a function with a memoizing callable that saves + up to `maxsize` results based on a Least Recently Used (LRU) + algorithm. + + """ + return cachedfunc(LRUCache(maxsize, getsizeof), typed, lock()) diff --git a/cachetools/rrcache.py b/cachetools/rrcache.py index 3042a4e..736fef8 100644 --- a/cachetools/rrcache.py +++ b/cachetools/rrcache.py @@ -1,4 +1,6 @@ from .cache import Cache +from .decorators import cachedfunc +from .lock import RLock import random @@ -18,3 +20,12 @@ class RRCache(Cache): except IndexError: raise KeyError('cache is empty') return (key, self.pop(key)) + + +def rr_cache(maxsize=128, typed=False, getsizeof=None, lock=RLock): + """Decorator to wrap a function with a memoizing callable that saves + up to `maxsize` results based on a Random Replacement (RR) + algorithm. + + """ + return cachedfunc(RRCache(maxsize, getsizeof), typed, lock()) diff --git a/cachetools/ttlcache.py b/cachetools/ttlcache.py index bb38f2b..679e0c0 100644 --- a/cachetools/ttlcache.py +++ b/cachetools/ttlcache.py @@ -1,5 +1,7 @@ from .lrucache import LRUCache +from .decorators import cachedfunc from .link import Link +from .lock import RLock import time @@ -104,3 +106,12 @@ class TTLCache(LRUCache): cache_delitem(self, head.data[0]) head.next.prev = root head = root.next = head.next + + +def ttl_cache(maxsize=128, ttl=600, timer=time.time, typed=False, + getsizeof=None, lock=RLock): + """Decorator to wrap a function with a memoizing callable that saves + up to `maxsize` results based on a Least Recently Used (LRU) + algorithm with a per-item time-to-live (TTL) value. + """ + return cachedfunc(TTLCache(maxsize, ttl, timer, getsizeof), typed, lock()) diff --git a/docs/index.rst b/docs/index.rst index 66e27fb..a8a35ae 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -122,6 +122,12 @@ documentation for details. Unlike :func:`functools.lru_cache`, setting `maxsize` to zero or :const:`None` is not supported. +.. decorator:: rr_cache(maxsize=128, typed=False, getsizeof=None, lock=threading.RLock) + + Decorator that wraps a function with a memoizing callable that + saves up to `maxsize` results based on a Random Replacement (RR) + algorithm. + .. decorator:: lfu_cache(maxsize=128, typed=False, getsizeof=None, lock=threading.RLock) Decorator that wraps a function with a memoizing callable that @@ -134,11 +140,11 @@ Unlike :func:`functools.lru_cache`, setting `maxsize` to zero or saves up to `maxsize` results based on a Least Recently Used (LRU) algorithm. -.. decorator:: rr_cache(maxsize=128, typed=False, getsizeof=None, lock=threading.RLock) +.. decorator:: ttl_cache(maxsize=128, ttl=3600, timer=time.time, typed=False, getsizeof=None, lock=threading.RLock) - Decorator that wraps a function with a memoizing callable that - saves up to `maxsize` results based on a Random Replacement (RR) - algorithm. + Decorator to wrap a function with a memoizing callable that saves + up to `maxsize` results based on a Least Recently Used (LRU) + algorithm with a per-item time-to-live (TTL) value. Method Decorators diff --git a/tests/test_cachedmethod.py b/tests/test_cachedmethod.py new file mode 100644 index 0000000..ee051e2 --- /dev/null +++ b/tests/test_cachedmethod.py @@ -0,0 +1,49 @@ +import unittest +import operator + +from cachetools import LRUCache, cachedmethod + + +class Cached(object): + + cache = LRUCache(maxsize=2) + + count = 0 + + @cachedmethod(operator.attrgetter('cache')) + def get(self, value): + count = self.count + self.count += 1 + return count + + @cachedmethod(operator.attrgetter('cache'), typed=True) + def get_typed(self, value): + count = self.count + self.count += 1 + return count + + +class CachedMethodTest(unittest.TestCase): + + def test_decorator(self): + cached = Cached() + + self.assertEqual(cached.get(0), 0) + self.assertEqual(cached.get(1), 1) + self.assertEqual(cached.get(1), 1) + self.assertEqual(cached.get(1.0), 1) + self.assertEqual(cached.get(1.0), 1) + + cached.cache.clear() + self.assertEqual(cached.get(1), 2) + + def test_typed_decorator(self): + cached = Cached() + + self.assertEqual(cached.get_typed(0), 0) + self.assertEqual(cached.get_typed(1), 1) + self.assertEqual(cached.get_typed(1), 1) + self.assertEqual(cached.get_typed(1.0), 2) + self.assertEqual(cached.get_typed(1.0), 2) + self.assertEqual(cached.get_typed(0.0), 3) + self.assertEqual(cached.get_typed(0), 4) diff --git a/tests/test_method.py b/tests/test_method.py deleted file mode 100644 index 40ab5ee..0000000 --- a/tests/test_method.py +++ /dev/null @@ -1,49 +0,0 @@ -import unittest -import operator - -from cachetools import LRUCache, cachedmethod - - -class Cached(object): - - cache = LRUCache(maxsize=2) - - count = 0 - - @cachedmethod(operator.attrgetter('cache')) - def get(self, value): - count = self.count - self.count += 1 - return count - - @cachedmethod(operator.attrgetter('cache'), typed=True) - def get_typed(self, value): - count = self.count - self.count += 1 - return count - - -class MethodTest(unittest.TestCase): - - def test_decorator(self): - cached = Cached() - - self.assertEqual(cached.get(0), 0) - self.assertEqual(cached.get(1), 1) - self.assertEqual(cached.get(1), 1) - self.assertEqual(cached.get(1.0), 1) - self.assertEqual(cached.get(1.0), 1) - - cached.cache.clear() - self.assertEqual(cached.get(1), 2) - - def test_typed_decorator(self): - cached = Cached() - - self.assertEqual(cached.get_typed(0), 0) - self.assertEqual(cached.get_typed(1), 1) - self.assertEqual(cached.get_typed(1), 1) - self.assertEqual(cached.get_typed(1.0), 2) - self.assertEqual(cached.get_typed(1.0), 2) - self.assertEqual(cached.get_typed(0.0), 3) - self.assertEqual(cached.get_typed(0), 4) diff --git a/tests/test_ttlcache.py b/tests/test_ttlcache.py index 6bf2d5e..938d0f2 100644 --- a/tests/test_ttlcache.py +++ b/tests/test_ttlcache.py @@ -1,7 +1,17 @@ import unittest from . import LRUCacheTestMixin -from cachetools import TTLCache +from cachetools import TTLCache, ttl_cache + + +@ttl_cache(maxsize=2) +def cached(n): + return n + + +@ttl_cache(maxsize=2, typed=True) +def cached_typed(n): + return n class TTLCacheTest(unittest.TestCase, LRUCacheTestMixin): @@ -79,3 +89,26 @@ class TTLCacheTest(unittest.TestCase, LRUCacheTestMixin): self.assertNotIn(1, cache) self.assertNotIn(2, cache) self.assertNotIn(3, cache) + + def test_decorator(self): + self.assertEqual(cached.cache_info(), (0, 0, 2, 0)) + self.assertEqual(cached(1), 1) + self.assertEqual(cached.cache_info(), (0, 1, 2, 1)) + self.assertEqual(cached(1), 1) + self.assertEqual(cached.cache_info(), (1, 1, 2, 1)) + self.assertEqual(cached(1.0), 1.0) + self.assertEqual(cached.cache_info(), (2, 1, 2, 1)) + + cached.cache_clear() + self.assertEqual(cached(1), 1) + self.assertEqual(cached.cache_info(), (2, 2, 2, 1)) + + def test_typed_decorator(self): + self.assertEqual(cached_typed(1), 1) + self.assertEqual(cached_typed.cache_info(), (0, 1, 2, 1)) + self.assertEqual(cached_typed(1), 1) + self.assertEqual(cached_typed.cache_info(), (1, 1, 2, 1)) + self.assertEqual(cached_typed(1.0), 1.0) + self.assertEqual(cached_typed.cache_info(), (1, 2, 2, 2)) + self.assertEqual(cached_typed(1.0), 1.0) + self.assertEqual(cached_typed.cache_info(), (2, 2, 2, 2)) -- cgit v1.2.3 From e57eb9c316449f30d2dba13a19ec9c49dae3bde0 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Tue, 23 Sep 2014 18:51:56 +0200 Subject: Fix #7: getsizeof() usage --- Changes | 2 ++ cachetools/cache.py | 20 +++++++++++++------- cachetools/lfucache.py | 7 ++++--- cachetools/lrucache.py | 7 ++++--- cachetools/ttlcache.py | 7 ++++--- docs/index.rst | 30 +++++++++++++----------------- tests/__init__.py | 13 +++++++++++++ 7 files changed, 53 insertions(+), 33 deletions(-) diff --git a/Changes b/Changes index 6059080..4cdd398 100644 --- a/Changes +++ b/Changes @@ -7,6 +7,8 @@ - Add `@ttl_cache` function decorator. +- Fix public `getsizeof()` usage. + 0.4.0 2014-06-16 ---------------- diff --git a/cachetools/cache.py b/cachetools/cache.py index d5176eb..abbafc5 100644 --- a/cachetools/cache.py +++ b/cachetools/cache.py @@ -8,16 +8,19 @@ class Cache(collections.MutableMapping): space when necessary. Derived classes may override :meth:`popitem` to implement specific caching strategies. If a subclass has to keep track of item access, insertion or deletion, - it may need override :meth:`__getitem__`, :meth:`__setitem__` and - :meth:`__delitem__`, too. + it may additionally need to override :meth:`__getitem__`, + :meth:`__setitem__` and :meth:`__delitem__`. If a subclass has to + keep meta data with its values, i.e. the `value` argument passed + to :meth:`Cache.__setitem__` is different from what a user would + regard as the cache's value, it will probably want to override + :meth:`getsizeof`, too. """ def __init__(self, maxsize, getsizeof=None): - if getsizeof is not None: - self.getsizeof = getsizeof self.__mapping = dict() self.__maxsize = maxsize + self.__getsizeof = getsizeof or self.__one self.__currsize = 0 def __repr__(self): @@ -34,7 +37,7 @@ class Cache(collections.MutableMapping): def __setitem__(self, key, value): mapping = self.__mapping maxsize = self.__maxsize - size = self.getsizeof(value) + size = self.__getsizeof(value) if size > maxsize: raise ValueError('value too large') if key not in mapping or mapping[key][1] < size: @@ -65,7 +68,10 @@ class Cache(collections.MutableMapping): """Return the current size of the cache.""" return self.__currsize - @staticmethod - def getsizeof(value): + def getsizeof(self, value): """Return the size of a cache element.""" + return self.__getsizeof(value) + + @staticmethod + def __one(value): return 1 diff --git a/cachetools/lfucache.py b/cachetools/lfucache.py index a0c2554..6949b6a 100644 --- a/cachetools/lfucache.py +++ b/cachetools/lfucache.py @@ -14,10 +14,11 @@ class LFUCache(Cache): """ def __init__(self, maxsize, getsizeof=None): - if getsizeof is not None: - Cache.__init__(self, maxsize, lambda e: getsizeof(e[0])) - else: + if getsizeof is None: Cache.__init__(self, maxsize) + else: + Cache.__init__(self, maxsize, lambda e: getsizeof(e[0])) + self.getsizeof = getsizeof def __getitem__(self, key, cache_getitem=Cache.__getitem__): entry = cache_getitem(self, key) diff --git a/cachetools/lrucache.py b/cachetools/lrucache.py index 4143f38..35bcddc 100644 --- a/cachetools/lrucache.py +++ b/cachetools/lrucache.py @@ -13,10 +13,11 @@ class LRUCache(Cache): """ def __init__(self, maxsize, getsizeof=None): - if getsizeof is not None: - Cache.__init__(self, maxsize, lambda e: getsizeof(e[0])) - else: + if getsizeof is None: Cache.__init__(self, maxsize) + else: + Cache.__init__(self, maxsize, lambda e: getsizeof(e[0])) + self.getsizeof = getsizeof root = Link() root.prev = root.next = root self.__root = root diff --git a/cachetools/ttlcache.py b/cachetools/ttlcache.py index 679e0c0..1b205ba 100644 --- a/cachetools/ttlcache.py +++ b/cachetools/ttlcache.py @@ -20,10 +20,11 @@ class TTLCache(LRUCache): """ def __init__(self, maxsize, ttl, timer=time.time, getsizeof=None): - if getsizeof is not None: - LRUCache.__init__(self, maxsize, lambda e: getsizeof(e[0])) - else: + if getsizeof is None: LRUCache.__init__(self, maxsize) + else: + LRUCache.__init__(self, maxsize, lambda e: getsizeof(e[0])) + self.getsizeof = getsizeof root = Link() root.prev = root.next = root self.__root = root diff --git a/docs/index.rst b/docs/index.rst index a8a35ae..973f704 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -23,15 +23,12 @@ including a variant of the Python 3 Standard Library LRUCache([('second', 2), ('fourth', 4)], maxsize=2, currsize=2) For the purpose of this module, a *cache* is a mutable_ mapping_ of a -fixed maximum *size*. When the cache is full, i.e. the current size -of the cache exceeds its maximum size, the cache must choose which -item(s) to discard based on a suitable `cache algorithm`_. - -In general, a cache's size is the sum of the size of its items. If -the size of each item is 1, a cache's size is equal to the number of -its items, i.e. `len(cache)`. An items's size may also be a property -or function of its value, e.g. the result of :func:`sys.getsizeof`, or -:func:`len` for string and sequence values. +fixed maximum size. When the cache is full, i.e. the size of the +cache would exceed its maximum size, the cache must choose which +item(s) to discard based on a suitable `cache algorithm`_. A cache's +size is the sum of the size of its items, and an item's size in +general is a property or function of its value, e.g. the result of +:func:`sys.getsizeof`, or :func:`len` for string and sequence values. This module provides various cache implementations based on different cache algorithms, as well as decorators for easily memoizing function @@ -48,14 +45,13 @@ different cache algorithms. All these classes derive from class :attr:`maxsize` and :attr:`currsize` to retrieve the maximum and current size of the cache. -:class:`Cache` also features a static method :meth:`getsizeof`, which -returns the size of a given item and may be overridden by subclasses. -The default implementation of :meth:`getsizeof` returns :const:`1` -irrespective of its `value` argument. For convenience, all cache -classes also accept an optional named constructor parameter -`getsizeof`, that may specify a function of one argument used to -retrieve the size of an item's value. - +:class:`Cache` also features a :meth:`getsizeof` method, which returns +the size of a given item. The default implementation of +:meth:`getsizeof` returns :const:`1` irrespective of its `value` +argument, making the cache's size equal to the number of its items, or +`len(cache)`. For convenience, all cache classes accept an optional +named constructor parameter `getsizeof`, which may specify a function +of one argument used to retrieve the size of an item's value. .. autoclass:: Cache :members: diff --git a/tests/__init__.py b/tests/__init__.py index 7048bfd..a1ff120 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -3,6 +3,16 @@ class CacheTestMixin(object): def make_cache(self, maxsize, getsizeof=None): raise NotImplementedError + def test_defaults(self): + cache = self.make_cache(maxsize=1) + self.assertEqual(0, len(cache)) + self.assertEqual(1, cache.maxsize) + self.assertEqual(0, cache.currsize) + self.assertEqual(1, cache.getsizeof(None)) + self.assertEqual(1, cache.getsizeof('')) + self.assertEqual(1, cache.getsizeof(0)) + + def test_insert(self): cache = self.make_cache(maxsize=2) @@ -93,6 +103,9 @@ class CacheTestMixin(object): cache = self.make_cache(maxsize=3, getsizeof=lambda x: x) self.assertEqual(3, cache.maxsize) self.assertEqual(0, cache.currsize) + self.assertEqual(1, cache.getsizeof(1)) + self.assertEqual(2, cache.getsizeof(2)) + self.assertEqual(3, cache.getsizeof(3)) cache.update({1: 1, 2: 2}) self.assertEqual(2, len(cache)) -- cgit v1.2.3 From 36c792475bc13e4fa6d606146c4144a71d79d2f4 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Tue, 23 Sep 2014 18:56:02 +0200 Subject: Release 0.5.0 --- Changes | 4 +--- cachetools/ttlcache.py | 1 - 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/Changes b/Changes index 4cdd398..acc1c6f 100644 --- a/Changes +++ b/Changes @@ -1,8 +1,6 @@ -0.5.0 UNRELEASED +0.5.0 2014-09-23 ---------------- -- Update Changelog, README, documentation. - - Do not delete expired items in TTLCache.__getitem__(). - Add `@ttl_cache` function decorator. diff --git a/cachetools/ttlcache.py b/cachetools/ttlcache.py index 1b205ba..b0dbf38 100644 --- a/cachetools/ttlcache.py +++ b/cachetools/ttlcache.py @@ -98,7 +98,6 @@ class TTLCache(LRUCache): return value def expire(self, time=None, cache_delitem=LRUCache.__delitem__): - """TODO""" if time is None: time = self.__timer() root = self.__root -- cgit v1.2.3 From ba0f849543155f76caf2107e9a8108e56d0604e1 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Tue, 23 Sep 2014 19:03:44 +0200 Subject: Fix default ttl for @ttl_cache --- docs/index.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/index.rst b/docs/index.rst index 973f704..1c7ee6b 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -136,7 +136,7 @@ Unlike :func:`functools.lru_cache`, setting `maxsize` to zero or saves up to `maxsize` results based on a Least Recently Used (LRU) algorithm. -.. decorator:: ttl_cache(maxsize=128, ttl=3600, timer=time.time, typed=False, getsizeof=None, lock=threading.RLock) +.. decorator:: ttl_cache(maxsize=128, ttl=600, timer=time.time, typed=False, getsizeof=None, lock=threading.RLock) Decorator to wrap a function with a memoizing callable that saves up to `maxsize` results based on a Least Recently Used (LRU) -- cgit v1.2.3 From d5ef9cc6f386e3d4e94590b2651eb77aa1bc7658 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Thu, 25 Sep 2014 18:03:17 +0200 Subject: Fix #12: No formatting of KeyError arguments. --- cachetools/ttlcache.py | 2 +- tests/test_ttlcache.py | 11 +++++++++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/cachetools/ttlcache.py b/cachetools/ttlcache.py index b0dbf38..c910d12 100644 --- a/cachetools/ttlcache.py +++ b/cachetools/ttlcache.py @@ -42,7 +42,7 @@ class TTLCache(LRUCache): def __getitem__(self, key, cache_getitem=LRUCache.__getitem__): value, link = cache_getitem(self, key) if link.data[1] < self.__timer(): - raise KeyError('%r has expired' % key) + raise KeyError(key) return value def __setitem__(self, key, value, diff --git a/tests/test_ttlcache.py b/tests/test_ttlcache.py index 938d0f2..c1145e5 100644 --- a/tests/test_ttlcache.py +++ b/tests/test_ttlcache.py @@ -90,6 +90,17 @@ class TTLCacheTest(unittest.TestCase, LRUCacheTestMixin): self.assertNotIn(2, cache) self.assertNotIn(3, cache) + def test_ttl_tuple_key(self): + cache = self.make_ttl_cache(maxsize=1, ttl=0) + + cache[(1, 2, 3)] = 42 + self.assertEqual(42, cache[(1, 2, 3)]) + cache.timer.inc() + with self.assertRaises(KeyError): + cache[(1, 2, 3)] + cache.expire() + self.assertNotIn((1, 2, 3), cache) + def test_decorator(self): self.assertEqual(cached.cache_info(), (0, 0, 2, 0)) self.assertEqual(cached(1), 1) -- cgit v1.2.3 From a837142c8d9232d26f8a9b5bc143f1dc4017983b Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Thu, 25 Sep 2014 18:06:23 +0200 Subject: Fix #11: Update README.rst file. --- README.rst | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/README.rst b/README.rst index 02a366e..6cd79d0 100644 --- a/README.rst +++ b/README.rst @@ -22,15 +22,12 @@ including a variant of the Python 3 Standard Library For the purpose of this module, a *cache* is a mutable_ mapping_ of a -fixed maximum *size*. When the cache is full, i.e. the current size -of the cache exceeds its maximum size, the cache must choose which -item(s) to discard based on a suitable `cache algorithm`_. - -In general, a cache's size is the sum of the size of its items. If -the size of each items is 1, a cache's size is equal to the number of -its items, i.e. ``len(cache)``. An items's size may also be a -property or function of its value, e.g. the result of -``sys.getsizeof()``, or ``len()`` for string and sequence values. +fixed maximum size. When the cache is full, i.e. the size of the +cache would exceed its maximum size, the cache must choose which +item(s) to discard based on a suitable `cache algorithm`_. A cache's +size is the sum of the size of its items, and an item's size in +general is a property or function of its value, e.g. the result of +``sys.getsizeof``, or ``len`` for string and sequence values. This module provides various cache implementations based on different cache algorithms, as well as decorators for easily memoizing function -- cgit v1.2.3 From 44b84324f5e52291753addaf9ab8fc89b2d507d2 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Thu, 25 Sep 2014 18:07:53 +0200 Subject: Prepare v0.5.1 --- Changes | 8 ++++++++ cachetools/__init__.py | 2 +- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/Changes b/Changes index acc1c6f..155a2ec 100644 --- a/Changes +++ b/Changes @@ -1,3 +1,11 @@ +0.5.1 2014-09-25 +---------------- + +- No formatting of `KeyError` arguments. + +- Update `README.rst`. + + 0.5.0 2014-09-23 ---------------- diff --git a/cachetools/__init__.py b/cachetools/__init__.py index c43a323..d73af09 100644 --- a/cachetools/__init__.py +++ b/cachetools/__init__.py @@ -14,4 +14,4 @@ __all__ = ( 'cachedmethod' ) -__version__ = '0.5.0' +__version__ = '0.5.1' -- cgit v1.2.3 From 2f9561081165498a9cc70a9810ce4a2eb7b07c21 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Sun, 12 Oct 2014 15:03:51 +0200 Subject: Add Travis CI --- .travis.yml | 11 +++++++++++ README.rst | 8 ++++++-- setup.py | 1 + 3 files changed, 18 insertions(+), 2 deletions(-) create mode 100644 .travis.yml diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 0000000..6219b6c --- /dev/null +++ b/.travis.yml @@ -0,0 +1,11 @@ +language: python +python: + - "2.7" + - "3.2" + - "3.3" + - "3.4" +# command to install dependencies +install: + - "pip install ." +# command to run tests +script: nosetests diff --git a/README.rst b/README.rst index 6cd79d0..1938241 100644 --- a/README.rst +++ b/README.rst @@ -45,14 +45,18 @@ Install cachetools using pip:: Project Resources ------------------------------------------------------------------------ -.. image:: http://img.shields.io/pypi/v/cachetools.svg +.. image:: http://img.shields.io/pypi/v/cachetools.svg?style=flat :target: https://pypi.python.org/pypi/cachetools/ :alt: Latest PyPI version -.. image:: http://img.shields.io/pypi/dm/cachetools.svg +.. image:: http://img.shields.io/pypi/dm/cachetools.svg?style=flat :target: https://pypi.python.org/pypi/cachetools/ :alt: Number of PyPI downloads +.. image:: http://img.shields.io/travis/tkem/cachetools.svg?style=flat + :target: https://travis-ci.org/tkem/cachetools/ + :alt: Travis CI build status + - `Documentation`_ - `Issue Tracker`_ - `Source Code`_ diff --git a/setup.py b/setup.py index ef6ba8e..07fef3f 100644 --- a/setup.py +++ b/setup.py @@ -28,6 +28,7 @@ setup( 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.2', + 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Topic :: Software Development :: Libraries :: Python Modules' ], -- cgit v1.2.3 From 4649656eb0cbf38a92fc8f4d6284c1c741600c9e Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Thu, 23 Oct 2014 18:44:58 +0200 Subject: Add coveralls --- .travis.yml | 18 ++++++++++-------- README.rst | 16 ++++++++++------ 2 files changed, 20 insertions(+), 14 deletions(-) diff --git a/.travis.yml b/.travis.yml index 6219b6c..3c1a2e3 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,11 +1,13 @@ language: python python: - - "2.7" - - "3.2" - - "3.3" - - "3.4" -# command to install dependencies + - 2.7 + - 3.2 + - 3.3 + - 3.4 install: - - "pip install ." -# command to run tests -script: nosetests + - pip install . + - pip install coverage coveralls +script: + - nosetests --with-coverage +after_success: + - coveralls diff --git a/README.rst b/README.rst index 1938241..59b502b 100644 --- a/README.rst +++ b/README.rst @@ -46,16 +46,20 @@ Project Resources ------------------------------------------------------------------------ .. image:: http://img.shields.io/pypi/v/cachetools.svg?style=flat - :target: https://pypi.python.org/pypi/cachetools/ - :alt: Latest PyPI version + :target: https://pypi.python.org/pypi/cachetools/ + :alt: Latest PyPI version .. image:: http://img.shields.io/pypi/dm/cachetools.svg?style=flat - :target: https://pypi.python.org/pypi/cachetools/ - :alt: Number of PyPI downloads + :target: https://pypi.python.org/pypi/cachetools/ + :alt: Number of PyPI downloads .. image:: http://img.shields.io/travis/tkem/cachetools.svg?style=flat - :target: https://travis-ci.org/tkem/cachetools/ - :alt: Travis CI build status + :target: https://travis-ci.org/tkem/cachetools/ + :alt: Travis CI build status + +.. image:: http://img.shields.io/coveralls/tkem/cachetools.svg?style=flat + :target: https://coveralls.io/r/tkem/cachetools + :alt: Test coverage - `Documentation`_ - `Issue Tracker`_ -- cgit v1.2.3 From cb95c354cee3ac998a0ccd0e264a5851d8b1d161 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Thu, 23 Oct 2014 18:52:41 +0200 Subject: Add .coveragerc --- .coveragerc | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 .coveragerc diff --git a/.coveragerc b/.coveragerc new file mode 100644 index 0000000..e77617c --- /dev/null +++ b/.coveragerc @@ -0,0 +1,5 @@ +[report] +omit = + */pyshared/* + */python?.?/* + */site-packages/nose/* -- cgit v1.2.3 From 430e338e3a36b59d7d2a7a771a801530cc2217cb Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Thu, 23 Oct 2014 08:16:46 +0200 Subject: Fix #15: Allow @cachedmethod.cache() to return None --- cachetools/decorators.py | 3 +++ docs/index.rst | 28 +++++++++++++++------------- tests/test_cachedmethod.py | 21 +++++++++++++++++---- 3 files changed, 35 insertions(+), 17 deletions(-) diff --git a/cachetools/decorators.py b/cachetools/decorators.py index 2407376..5e4cdf9 100644 --- a/cachetools/decorators.py +++ b/cachetools/decorators.py @@ -63,6 +63,8 @@ def cachedmethod(cache, typed=False): def decorator(method): def wrapper(self, *args, **kwargs): mapping = cache(self) + if mapping is None: + return method(self, *args, **kwargs) key = makekey((method,) + args, kwargs) try: return mapping[key] @@ -72,6 +74,7 @@ def cachedmethod(cache, typed=False): mapping[key] = result return result + wrapper.cache = cache return functools.update_wrapper(wrapper, method) return decorator diff --git a/docs/index.rst b/docs/index.rst index 1c7ee6b..6de7120 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -90,7 +90,7 @@ Function Decorators ------------------------------------------------------------------------ This module provides several memoizing function decorators compatible -with --- though not necessarily as efficient as --- the Python 3 +with -- though not necessarily as efficient as -- the Python 3 Standard Library :func:`functools.lru_cache` decorator. In addition to a `maxsize` parameter, all decorators feature optional @@ -149,13 +149,19 @@ Method Decorators .. decorator:: cachedmethod(cache, typed=False) `cache` specifies a function of one argument that, when passed - :const:`self`, will return a *cache* for the respective instance or - class. Multiple methods of an object or class may share the same - cache. + :const:`self`, will return a cache object for the respective + instance or class. If `cache(self)` returns :const:`None`, the + original underlying method is called directly and the result is not + cached. The `cache` function is also available as the wrapped + function's :attr:`cache` attribute. - One advantage of the `@cachedmethod` decorator over the similar - function decorators is that cache properties such as `maxsize` can - be set at runtime:: + Multiple methods of an object or class may share the same cache + object, but it is the user's responsibility to handle concurrent + cache access in a multi-threaded environment. + + One advantage of this decorator over the similar function + decorators is that cache properties such as `maxsize` can be set at + runtime:: import operator import urllib.request @@ -168,18 +174,14 @@ Method Decorators self.cache = LRUCache(maxsize=cachesize) @cachedmethod(operator.attrgetter('cache')) - def get_pep(self, num): + def get(self, num): """Retrieve text of a Python Enhancement Proposal""" url = 'http://www.python.org/dev/peps/pep-%04d/' % num with urllib.request.urlopen(url) as s: return s.read() peps = CachedPEPs(cachesize=10) - print("PEP #1: %s" % peps.get_pep(1)) - - Note that no locking will be performed on the object returned by - `cache(self)`, so dealing with concurrent access is entirely the - responsibility of the user. + print("PEP #1: %s" % peps.get(1)) .. _mutable: http://docs.python.org/dev/glossary.html#term-mutable diff --git a/tests/test_cachedmethod.py b/tests/test_cachedmethod.py index ee051e2..44a2f2f 100644 --- a/tests/test_cachedmethod.py +++ b/tests/test_cachedmethod.py @@ -6,10 +6,11 @@ from cachetools import LRUCache, cachedmethod class Cached(object): - cache = LRUCache(maxsize=2) - count = 0 + def __init__(self, cache): + self.cache = cache + @cachedmethod(operator.attrgetter('cache')) def get(self, value): count = self.count @@ -26,7 +27,8 @@ class Cached(object): class CachedMethodTest(unittest.TestCase): def test_decorator(self): - cached = Cached() + cached = Cached(LRUCache(maxsize=2)) + self.assertEqual(cached.cache, cached.get.cache(cached)) self.assertEqual(cached.get(0), 0) self.assertEqual(cached.get(1), 1) @@ -38,7 +40,8 @@ class CachedMethodTest(unittest.TestCase): self.assertEqual(cached.get(1), 2) def test_typed_decorator(self): - cached = Cached() + cached = Cached(LRUCache(maxsize=2)) + self.assertEqual(cached.cache, cached.get_typed.cache(cached)) self.assertEqual(cached.get_typed(0), 0) self.assertEqual(cached.get_typed(1), 1) @@ -47,3 +50,13 @@ class CachedMethodTest(unittest.TestCase): self.assertEqual(cached.get_typed(1.0), 2) self.assertEqual(cached.get_typed(0.0), 3) self.assertEqual(cached.get_typed(0), 4) + + def test_decorator_nocache(self): + cached = Cached(None) + self.assertEqual(None, cached.get.cache(cached)) + + self.assertEqual(cached.get(0), 0) + self.assertEqual(cached.get(1), 1) + self.assertEqual(cached.get(1), 2) + self.assertEqual(cached.get(1.0), 3) + self.assertEqual(cached.get(1.0), 4) -- cgit v1.2.3 From aae7bf592c72edbcf6a976f4a050f21fe2689181 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Thu, 23 Oct 2014 11:59:45 +0200 Subject: Fix #16: Support unsynchronized function decorators. --- cachetools/decorators.py | 34 ++++++++++++++++++++++++---------- cachetools/lfucache.py | 2 +- cachetools/lrucache.py | 2 +- cachetools/rrcache.py | 2 +- cachetools/ttlcache.py | 2 +- tests/__init__.py | 1 - tests/test_lfucache.py | 2 +- tests/test_lrucache.py | 2 +- tests/test_rrcache.py | 2 +- tests/test_ttlcache.py | 2 +- 10 files changed, 32 insertions(+), 19 deletions(-) diff --git a/cachetools/decorators.py b/cachetools/decorators.py index 5e4cdf9..7ad617d 100644 --- a/cachetools/decorators.py +++ b/cachetools/decorators.py @@ -1,29 +1,43 @@ import collections +import contextlib # noqa import functools +try: + from contextlib import ExitStack as NullContext # Python 3.3 +except ImportError: + class NullContext: + def __enter__(self): + pass + + def __exit__(self, exc_type, exc_val, exc_tb): + pass + CacheInfo = collections.namedtuple('CacheInfo', 'hits misses maxsize currsize') +nullcontext = NullContext() + -def _makekey(args, kwargs): +def makekey_untyped(args, kwargs): return (args, tuple(sorted(kwargs.items()))) -def _makekey_typed(args, kwargs): - key = _makekey(args, kwargs) +def makekey_typed(args, kwargs): + key = makekey_untyped(args, kwargs) key += tuple(type(v) for v in args) key += tuple(type(v) for _, v in sorted(kwargs.items())) return key -def cachedfunc(cache, typed, lock): - makekey = _makekey_typed if typed else _makekey +def cachedfunc(cache, typed=False, lock=None): + makekey = makekey_typed if typed else makekey_untyped + context = lock() if lock else nullcontext def decorator(func): stats = [0, 0] def wrapper(*args, **kwargs): key = makekey(args, kwargs) - with lock: + with context: try: result = cache[key] stats[0] += 1 @@ -31,19 +45,19 @@ def cachedfunc(cache, typed, lock): except KeyError: stats[1] += 1 result = func(*args, **kwargs) - with lock: + with context: cache[key] = result return result def cache_info(): - with lock: + with context: hits, misses = stats maxsize = cache.maxsize currsize = cache.currsize return CacheInfo(hits, misses, maxsize, currsize) def cache_clear(): - with lock: + with context: cache.clear() wrapper.cache_info = cache_info @@ -58,7 +72,7 @@ def cachedmethod(cache, typed=False): callable that saves results in a (possibly shared) cache. """ - makekey = _makekey_typed if typed else _makekey + makekey = makekey_typed if typed else makekey_untyped def decorator(method): def wrapper(self, *args, **kwargs): diff --git a/cachetools/lfucache.py b/cachetools/lfucache.py index 6949b6a..e0ea562 100644 --- a/cachetools/lfucache.py +++ b/cachetools/lfucache.py @@ -44,4 +44,4 @@ def lfu_cache(maxsize=128, typed=False, getsizeof=None, lock=RLock): algorithm. """ - return cachedfunc(LFUCache(maxsize, getsizeof), typed, lock()) + return cachedfunc(LFUCache(maxsize, getsizeof), typed, lock) diff --git a/cachetools/lrucache.py b/cachetools/lrucache.py index 35bcddc..defeed1 100644 --- a/cachetools/lrucache.py +++ b/cachetools/lrucache.py @@ -81,4 +81,4 @@ def lru_cache(maxsize=128, typed=False, getsizeof=None, lock=RLock): algorithm. """ - return cachedfunc(LRUCache(maxsize, getsizeof), typed, lock()) + return cachedfunc(LRUCache(maxsize, getsizeof), typed, lock) diff --git a/cachetools/rrcache.py b/cachetools/rrcache.py index 736fef8..d2494c7 100644 --- a/cachetools/rrcache.py +++ b/cachetools/rrcache.py @@ -28,4 +28,4 @@ def rr_cache(maxsize=128, typed=False, getsizeof=None, lock=RLock): algorithm. """ - return cachedfunc(RRCache(maxsize, getsizeof), typed, lock()) + return cachedfunc(RRCache(maxsize, getsizeof), typed, lock) diff --git a/cachetools/ttlcache.py b/cachetools/ttlcache.py index c910d12..453f9a3 100644 --- a/cachetools/ttlcache.py +++ b/cachetools/ttlcache.py @@ -114,4 +114,4 @@ def ttl_cache(maxsize=128, ttl=600, timer=time.time, typed=False, up to `maxsize` results based on a Least Recently Used (LRU) algorithm with a per-item time-to-live (TTL) value. """ - return cachedfunc(TTLCache(maxsize, ttl, timer, getsizeof), typed, lock()) + return cachedfunc(TTLCache(maxsize, ttl, timer, getsizeof), typed, lock) diff --git a/tests/__init__.py b/tests/__init__.py index a1ff120..bd6bc2d 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -12,7 +12,6 @@ class CacheTestMixin(object): self.assertEqual(1, cache.getsizeof('')) self.assertEqual(1, cache.getsizeof(0)) - def test_insert(self): cache = self.make_cache(maxsize=2) diff --git a/tests/test_lfucache.py b/tests/test_lfucache.py index 245793e..3f66681 100644 --- a/tests/test_lfucache.py +++ b/tests/test_lfucache.py @@ -9,7 +9,7 @@ def cached(n): return n -@lfu_cache(maxsize=2, typed=True) +@lfu_cache(maxsize=2, typed=True, lock=None) def cached_typed(n): return n diff --git a/tests/test_lrucache.py b/tests/test_lrucache.py index bdbb39d..9bedcb5 100644 --- a/tests/test_lrucache.py +++ b/tests/test_lrucache.py @@ -9,7 +9,7 @@ def cached(n): return n -@lru_cache(maxsize=2, typed=True) +@lru_cache(maxsize=2, typed=True, lock=None) def cached_typed(n): return n diff --git a/tests/test_rrcache.py b/tests/test_rrcache.py index 207b7fd..fb5df22 100644 --- a/tests/test_rrcache.py +++ b/tests/test_rrcache.py @@ -9,7 +9,7 @@ def cached(n): return n -@rr_cache(maxsize=2, typed=True) +@rr_cache(maxsize=2, typed=True, lock=None) def cached_typed(n): return n diff --git a/tests/test_ttlcache.py b/tests/test_ttlcache.py index c1145e5..ad6ce37 100644 --- a/tests/test_ttlcache.py +++ b/tests/test_ttlcache.py @@ -9,7 +9,7 @@ def cached(n): return n -@ttl_cache(maxsize=2, typed=True) +@ttl_cache(maxsize=2, typed=True, lock=None) def cached_typed(n): return n -- cgit v1.2.3 From 34881215cfeff79eeea4852012c94830cdd89bd2 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Thu, 23 Oct 2014 12:18:32 +0200 Subject: Fix #17: Raise TTLCache.ExpiredError for expired TTLCache items. --- cachetools/ttlcache.py | 58 +++++++++++++++++++++++++++++++------------------- docs/index.rst | 13 +++++++---- tests/test_ttlcache.py | 6 +++--- 3 files changed, 48 insertions(+), 29 deletions(-) diff --git a/cachetools/ttlcache.py b/cachetools/ttlcache.py index 453f9a3..56ff553 100644 --- a/cachetools/ttlcache.py +++ b/cachetools/ttlcache.py @@ -13,12 +13,25 @@ class TTLCache(LRUCache): This class associates a time-to-live value with each item. Items that expire because they have exceeded their time-to-live will be - removed automatically. If no expired items are there to remove, - the least recently used items will be discarded first to make - space when necessary. + removed. If no expired items are there to remove, the least + recently used items will be discarded first to make space when + necessary. + + By default, the time-to-live is specified in seconds, and the + standard :func:`time.time` function is used to retrieve the + current time. A custom `timer` function can be supplied if + needed. """ + class ExpiredError(KeyError): + """Raised when a cached item's time-to-live has expired. + + This is a subclass of :exc:`KeyError`. + + """ + pass + def __init__(self, maxsize, ttl, timer=time.time, getsizeof=None): if getsizeof is None: LRUCache.__init__(self, maxsize) @@ -42,7 +55,7 @@ class TTLCache(LRUCache): def __getitem__(self, key, cache_getitem=LRUCache.__getitem__): value, link = cache_getitem(self, key) if link.data[1] < self.__timer(): - raise KeyError(key) + raise TTLCache.ExpiredError(key) return value def __setitem__(self, key, value, @@ -75,15 +88,16 @@ class TTLCache(LRUCache): link.unlink() self.expire() - @property - def ttl(self): - """Return the time-to-live of the cache.""" - return self.__ttl - - @property - def timer(self): - """Return the timer used by the cache.""" - return self.__timer + def expire(self, time=None, cache_delitem=LRUCache.__delitem__): + """Remove expired items from the cache.""" + if time is None: + time = self.__timer() + root = self.__root + head = root.next + while head is not root and head.data[1] < time: + cache_delitem(self, head.data[0]) + head.next.prev = root + head = root.next = head.next def pop(self, key, default=_marker): try: @@ -97,15 +111,15 @@ class TTLCache(LRUCache): self.expire() return value - def expire(self, time=None, cache_delitem=LRUCache.__delitem__): - if time is None: - time = self.__timer() - root = self.__root - head = root.next - while head is not root and head.data[1] < time: - cache_delitem(self, head.data[0]) - head.next.prev = root - head = root.next = head.next + @property + def timer(self): + """Return the timer used by the cache.""" + return self.__timer + + @property + def ttl(self): + """Return the time-to-live of the cache.""" + return self.__ttl def ttl_cache(maxsize=128, ttl=600, timer=time.time, typed=False, diff --git a/docs/index.rst b/docs/index.rst index 6de7120..da86c26 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -66,11 +66,11 @@ of one argument used to retrieve the size of an item's value. :members: .. autoclass:: TTLCache - :members: + :members: ExpiredError, timer, ttl Note that a cache item may expire at *any* time, so iterating over - the items of a :class:`TTLCache` may raise :class:`KeyError` - unexpectedly:: + the items of a :class:`TTLCache` may raise + :class:`TTLCache.ExpiredError` unexpectedly:: from cachetools import TTLCache import time @@ -82,9 +82,14 @@ of one argument used to retrieve the size of an item's value. for key in cache: try: print(cache[key]) - except KeyError: + except TTLCache.ExpiredError: print('Key %r has expired' % key) + .. automethod:: expire(self, time=None) + + If `time` is not :const:`None`, remove all items whose + time-to-live would have expired by `time`. + Function Decorators ------------------------------------------------------------------------ diff --git a/tests/test_ttlcache.py b/tests/test_ttlcache.py index ad6ce37..c89afe8 100644 --- a/tests/test_ttlcache.py +++ b/tests/test_ttlcache.py @@ -64,12 +64,12 @@ class TTLCacheTest(unittest.TestCase, LRUCacheTestMixin): cache[1] = 1 self.assertEqual(1, cache[1]) cache.timer.inc() - with self.assertRaises(KeyError): + with self.assertRaises(TTLCache.ExpiredError): cache[1] cache[2] = 2 self.assertEqual(2, cache[2]) cache.timer.inc() - with self.assertRaises(KeyError): + with self.assertRaises(TTLCache.ExpiredError): cache[2] cache[3] = 3 self.assertEqual(3, cache[3]) @@ -96,7 +96,7 @@ class TTLCacheTest(unittest.TestCase, LRUCacheTestMixin): cache[(1, 2, 3)] = 42 self.assertEqual(42, cache[(1, 2, 3)]) cache.timer.inc() - with self.assertRaises(KeyError): + with self.assertRaises(TTLCache.ExpiredError): cache[(1, 2, 3)] cache.expire() self.assertNotIn((1, 2, 3), cache) -- cgit v1.2.3 From 482d35f78b643877cbd6cb23fe6b4c2d0f692735 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Thu, 23 Oct 2014 19:43:00 +0200 Subject: Prepare v0.6.0 --- cachetools/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cachetools/__init__.py b/cachetools/__init__.py index d73af09..f789ab9 100644 --- a/cachetools/__init__.py +++ b/cachetools/__init__.py @@ -14,4 +14,4 @@ __all__ = ( 'cachedmethod' ) -__version__ = '0.5.1' +__version__ = '0.6.0' -- cgit v1.2.3 From aba5de60c6c7cc5d208c51732d6cd70066ff30f9 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Thu, 23 Oct 2014 19:54:12 +0200 Subject: Update .travis.yml --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 3c1a2e3..fd9887b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -8,6 +8,6 @@ install: - pip install . - pip install coverage coveralls script: - - nosetests --with-coverage + - nosetests --with-coverage --cover-package=cachetools after_success: - coveralls -- cgit v1.2.3 From 2633e3cf5d85276f143cd8c050aaac74097799ba Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Thu, 23 Oct 2014 20:12:41 +0200 Subject: Fix Sphinx automethod --- docs/index.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/index.rst b/docs/index.rst index da86c26..67a3f10 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -85,7 +85,7 @@ of one argument used to retrieve the size of an item's value. except TTLCache.ExpiredError: print('Key %r has expired' % key) - .. automethod:: expire(self, time=None) + .. automethod:: expire(time=None) If `time` is not :const:`None`, remove all items whose time-to-live would have expired by `time`. -- cgit v1.2.3 From f233d17291f0e83a72b8c133ae42be314b903ddc Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Fri, 24 Oct 2014 17:53:08 +0200 Subject: Add v0.6.0 changes. --- Changes | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/Changes b/Changes index 155a2ec..5e58efb 100644 --- a/Changes +++ b/Changes @@ -1,3 +1,13 @@ +0.6.0 2014-10-13 +---------------- + +- Raise `TTLCache.ExpiredError` for expired `TTLCache` items. + +- Support unsynchronized function decorators. + +- Allow `@cachedmethod.cache()` to return None + + 0.5.1 2014-09-25 ---------------- -- cgit v1.2.3 From c22a043fa07c2a39afda54da5027919401b567d3 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Tue, 4 Nov 2014 20:04:22 +0100 Subject: Update README.rst --- README.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.rst b/README.rst index 59b502b..7af31c6 100644 --- a/README.rst +++ b/README.rst @@ -53,11 +53,11 @@ Project Resources :target: https://pypi.python.org/pypi/cachetools/ :alt: Number of PyPI downloads -.. image:: http://img.shields.io/travis/tkem/cachetools.svg?style=flat +.. image:: http://img.shields.io/travis/tkem/cachetools/master.svg?style=flat :target: https://travis-ci.org/tkem/cachetools/ :alt: Travis CI build status -.. image:: http://img.shields.io/coveralls/tkem/cachetools.svg?style=flat +.. image:: http://img.shields.io/coveralls/tkem/cachetools/master.svg?style=flat :target: https://coveralls.io/r/tkem/cachetools :alt: Test coverage -- cgit v1.2.3 From 101513c7817338a0b074bc01a9db93aaa6688e93 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Wed, 5 Nov 2014 19:56:17 +0100 Subject: Prepare v.0.7.0 --- CHANGES.rst | 74 ++++++++++++++++++++++++++++++++++++++++++++++++++ Changes | 68 ---------------------------------------------- README.rst | 4 +-- cachetools/__init__.py | 2 +- docs/index.rst | 2 +- 5 files changed, 78 insertions(+), 72 deletions(-) create mode 100644 CHANGES.rst delete mode 100644 Changes diff --git a/CHANGES.rst b/CHANGES.rst new file mode 100644 index 0000000..fb073e2 --- /dev/null +++ b/CHANGES.rst @@ -0,0 +1,74 @@ +0.7.0 UNRELEASED +---------------- + +- Move `Changes` to `CHANGES.rst`. + + +0.6.0 2014-10-13 +---------------- + +- Raise ``TTLCache.ExpiredError`` for expired ``TTLCache`` items. + +- Support unsynchronized function decorators. + +- Allow ``@cachedmethod.cache()`` to return None + + +0.5.1 2014-09-25 +---------------- + +- No formatting of ``KeyError`` arguments. + +- Update ``README.rst``. + + +0.5.0 2014-09-23 +---------------- + +- Do not delete expired items in TTLCache.__getitem__(). + +- Add ``@ttl_cache`` function decorator. + +- Fix public ``getsizeof()`` usage. + + +0.4.0 2014-06-16 +---------------- + +- Add ``TTLCache``. + +- Add ``Cache`` base class. + +- Remove ``@cachedmethod`` `lock` parameter. + + +0.3.1 2014-05-07 +---------------- + +- Add proper locking for ``cache_clear()`` and ``cache_info()``. + +- Report `size` in ``cache_info()``. + + +0.3.0 2014-05-06 +---------------- + +- Remove ``@cache`` decorator. + +- Add ``size``, ``getsizeof`` members. + +- Add ``@cachedmethod`` decorator. + + +0.2.0 2014-04-02 +---------------- + +- Add ``@cache`` decorator. + +- Update documentation. + + +0.1.0 2014-03-27 +---------------- + +- Initial release. diff --git a/Changes b/Changes deleted file mode 100644 index 5e58efb..0000000 --- a/Changes +++ /dev/null @@ -1,68 +0,0 @@ -0.6.0 2014-10-13 ----------------- - -- Raise `TTLCache.ExpiredError` for expired `TTLCache` items. - -- Support unsynchronized function decorators. - -- Allow `@cachedmethod.cache()` to return None - - -0.5.1 2014-09-25 ----------------- - -- No formatting of `KeyError` arguments. - -- Update `README.rst`. - - -0.5.0 2014-09-23 ----------------- - -- Do not delete expired items in TTLCache.__getitem__(). - -- Add `@ttl_cache` function decorator. - -- Fix public `getsizeof()` usage. - - -0.4.0 2014-06-16 ----------------- - -- Add `TTLCache`. - -- Add `Cache` base class. - -- Remove `@cachedmethod` `lock` parameter. - - -0.3.1 2014-05-07 ----------------- - -- Add proper locking for `cache_clear()` and `cache_info()`. - -- Report `size` in `cache_info()`. - - -0.3.0 2014-05-06 ----------------- - -- Remove `@cache` decorator. - -- Add `size`, `getsizeof` members. - -- Add `@cachedmethod` decorator. - - -0.2.0 2014-04-02 ----------------- - -- Add `@cache` decorator. - -- Update documentation. - - -0.1.0 2014-03-27 ----------------- - -- Initial release. diff --git a/README.rst b/README.rst index 7af31c6..3b69c69 100644 --- a/README.rst +++ b/README.rst @@ -29,7 +29,7 @@ size is the sum of the size of its items, and an item's size in general is a property or function of its value, e.g. the result of ``sys.getsizeof``, or ``len`` for string and sequence values. -This module provides various cache implementations based on different +This module provides multiple cache implementations based on different cache algorithms, as well as decorators for easily memoizing function and method calls. @@ -83,5 +83,5 @@ Licensed under the `MIT License`_. .. _Documentation: http://pythonhosted.org/cachetools/ .. _Issue Tracker: https://github.com/tkem/cachetools/issues/ .. _Source Code: https://github.com/tkem/cachetools/ -.. _Change Log: http://raw.github.com/tkem/cachetools/master/Changes +.. _Change Log: https://github.com/tkem/cachetools/blob/master/CHANGES.rst .. _MIT License: http://raw.github.com/tkem/cachetools/master/LICENSE diff --git a/cachetools/__init__.py b/cachetools/__init__.py index f789ab9..1da08fc 100644 --- a/cachetools/__init__.py +++ b/cachetools/__init__.py @@ -14,4 +14,4 @@ __all__ = ( 'cachedmethod' ) -__version__ = '0.6.0' +__version__ = '0.7.0' diff --git a/docs/index.rst b/docs/index.rst index 67a3f10..a22e0b4 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -30,7 +30,7 @@ size is the sum of the size of its items, and an item's size in general is a property or function of its value, e.g. the result of :func:`sys.getsizeof`, or :func:`len` for string and sequence values. -This module provides various cache implementations based on different +This module provides multiple cache implementations based on different cache algorithms, as well as decorators for easily memoizing function and method calls. -- cgit v1.2.3 From e626ad022e89811dbeb33720a011a53840562e18 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Mon, 10 Nov 2014 06:15:33 +0100 Subject: Fix #22: Remove NullContext implementation based on ExitStack. --- CHANGES.rst | 4 +++- cachetools/decorators.py | 17 +++++++---------- 2 files changed, 10 insertions(+), 11 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index fb073e2..7aced8b 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,7 +1,9 @@ 0.7.0 UNRELEASED ---------------- -- Move `Changes` to `CHANGES.rst`. +- Remove ``NullContext`` implementation based on ``ExitStack``. + +- Rename `Changes` to `CHANGES.rst`. 0.6.0 2014-10-13 diff --git a/cachetools/decorators.py b/cachetools/decorators.py index 7ad617d..44ffcb5 100644 --- a/cachetools/decorators.py +++ b/cachetools/decorators.py @@ -1,18 +1,15 @@ import collections -import contextlib # noqa import functools -try: - from contextlib import ExitStack as NullContext # Python 3.3 -except ImportError: - class NullContext: - def __enter__(self): - pass +CacheInfo = collections.namedtuple('CacheInfo', 'hits misses maxsize currsize') - def __exit__(self, exc_type, exc_val, exc_tb): - pass -CacheInfo = collections.namedtuple('CacheInfo', 'hits misses maxsize currsize') +class NullContext: + def __enter__(self): + pass + + def __exit__(self, exc_type, exc_val, exc_tb): + pass nullcontext = NullContext() -- cgit v1.2.3 From 07646b77433add18373b1b797ba1ddff6416ecb2 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Wed, 5 Nov 2014 20:27:14 +0100 Subject: Fix #14: LRU/TTLCache refactoring. --- CHANGES.rst | 2 + cachetools/link.py | 10 ---- cachetools/lrucache.py | 66 ++++++++++++++----------- cachetools/ttlcache.py | 128 ++++++++++++++++++++++++++++++------------------- tests/__init__.py | 53 ++------------------ tests/test_lrucache.py | 51 +++++++++++++++++++- tests/test_ttlcache.py | 77 ++++++++++++++++++++++------- 7 files changed, 234 insertions(+), 153 deletions(-) delete mode 100644 cachetools/link.py diff --git a/CHANGES.rst b/CHANGES.rst index 7aced8b..d68bb9a 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,6 +1,8 @@ 0.7.0 UNRELEASED ---------------- +- Refactor ``LRUCache``, ``TTLCache``. + - Remove ``NullContext`` implementation based on ``ExitStack``. - Rename `Changes` to `CHANGES.rst`. diff --git a/cachetools/link.py b/cachetools/link.py deleted file mode 100644 index 122735c..0000000 --- a/cachetools/link.py +++ /dev/null @@ -1,10 +0,0 @@ -class Link(object): - __slots__ = 'prev', 'next', 'data' - - def unlink(self): - next = self.next - prev = self.prev - prev.next = next - next.prev = prev - del self.next - del self.prev diff --git a/cachetools/lrucache.py b/cachetools/lrucache.py index defeed1..2d68077 100644 --- a/cachetools/lrucache.py +++ b/cachetools/lrucache.py @@ -1,9 +1,19 @@ from .cache import Cache from .decorators import cachedfunc -from .link import Link from .lock import RLock +class Link(object): + + __slots__ = 'key', 'value', 'prev', 'next' + + def unlink(self): + next = self.next + prev = self.prev + prev.next = next + next.prev = prev + + class LRUCache(Cache): """Least Recently Used (LRU) cache implementation. @@ -13,55 +23,55 @@ class LRUCache(Cache): """ def __init__(self, maxsize, getsizeof=None): - if getsizeof is None: - Cache.__init__(self, maxsize) - else: - Cache.__init__(self, maxsize, lambda e: getsizeof(e[0])) + if getsizeof is not None: + Cache.__init__(self, maxsize, lambda link: getsizeof(link.value)) self.getsizeof = getsizeof - root = Link() + else: + Cache.__init__(self, maxsize) + self.__root = root = Link() root.prev = root.next = root - self.__root = root def __repr__(self, cache_getitem=Cache.__getitem__): + # prevent item reordering return '%s(%r, maxsize=%d, currsize=%d)' % ( self.__class__.__name__, - [(key, cache_getitem(self, key)[0]) for key in self], + [(key, cache_getitem(self, key).value) for key in self], self.maxsize, self.currsize, ) def __getitem__(self, key, cache_getitem=Cache.__getitem__): - value, link = cache_getitem(self, key) - root = self.__root - link.prev.next = link.next - link.next.prev = link.prev + link = cache_getitem(self, key) + next = link.next + prev = link.prev + prev.next = next + next.prev = prev + link.next = root = self.__root link.prev = tail = root.prev - link.next = root tail.next = root.prev = link - return value + return link.value def __setitem__(self, key, value, cache_getitem=Cache.__getitem__, cache_setitem=Cache.__setitem__): try: - _, link = cache_getitem(self, key) + oldlink = cache_getitem(self, key) except KeyError: - link = Link() - cache_setitem(self, key, (value, link)) - try: - link.prev.next = link.next - link.next.prev = link.prev - except AttributeError: - link.data = key - root = self.__root + oldlink = None + link = Link() + link.key = key + link.value = value + cache_setitem(self, key, link) + if oldlink: + oldlink.unlink() + link.next = root = self.__root link.prev = tail = root.prev - link.next = root tail.next = root.prev = link def __delitem__(self, key, cache_getitem=Cache.__getitem__, cache_delitem=Cache.__delitem__): - _, link = cache_getitem(self, key) + link = cache_getitem(self, key) cache_delitem(self, key) link.unlink() @@ -71,8 +81,10 @@ class LRUCache(Cache): link = root.next if link is root: raise KeyError('cache is empty') - key = link.data - return (key, self.pop(key)) + key = link.key + Cache.__delitem__(self, key) + link.unlink() + return (key, link.value) def lru_cache(maxsize=128, typed=False, getsizeof=None, lock=RLock): diff --git a/cachetools/ttlcache.py b/cachetools/ttlcache.py index 56ff553..b6dc2f0 100644 --- a/cachetools/ttlcache.py +++ b/cachetools/ttlcache.py @@ -1,14 +1,31 @@ -from .lrucache import LRUCache +from .cache import Cache from .decorators import cachedfunc -from .link import Link from .lock import RLock import time -_marker = object() +class Link(object): -class TTLCache(LRUCache): + __slots__ = ( + 'key', 'value', 'expire', + 'ttl_prev', 'ttl_next', + 'lru_prev', 'lru_next' + ) + + def unlink(self): + ttl_next = self.ttl_next + ttl_prev = self.ttl_prev + ttl_prev.ttl_next = ttl_next + ttl_next.ttl_prev = ttl_prev + + lru_next = self.lru_next + lru_prev = self.lru_prev + lru_prev.lru_next = lru_next + lru_next.lru_prev = lru_prev + + +class TTLCache(Cache): """Cache implementation with per-item time-to-live (TTL) value. This class associates a time-to-live value with each item. Items @@ -34,82 +51,93 @@ class TTLCache(LRUCache): def __init__(self, maxsize, ttl, timer=time.time, getsizeof=None): if getsizeof is None: - LRUCache.__init__(self, maxsize) + Cache.__init__(self, maxsize) else: - LRUCache.__init__(self, maxsize, lambda e: getsizeof(e[0])) + Cache.__init__(self, maxsize, lambda e: getsizeof(e.value)) self.getsizeof = getsizeof - root = Link() - root.prev = root.next = root - self.__root = root + self.__root = root = Link() + root.ttl_prev = root.ttl_next = root + root.lru_prev = root.lru_next = root self.__timer = timer self.__ttl = ttl - def __repr__(self, cache_getitem=LRUCache.__getitem__): + def __repr__(self, cache_getitem=Cache.__getitem__): + # prevent item reordering/expiration return '%s(%r, maxsize=%d, currsize=%d)' % ( self.__class__.__name__, - [(key, cache_getitem(self, key)[0]) for key in self], + [(key, cache_getitem(self, key).value) for key in self], self.maxsize, self.currsize, ) - def __getitem__(self, key, cache_getitem=LRUCache.__getitem__): - value, link = cache_getitem(self, key) - if link.data[1] < self.__timer(): + def __getitem__(self, key, cache_getitem=Cache.__getitem__): + link = cache_getitem(self, key) + if link.expire < self.__timer(): raise TTLCache.ExpiredError(key) - return value + next = link.lru_next + prev = link.lru_prev + prev.lru_next = next + next.lru_prev = prev + link.lru_next = root = self.__root + link.lru_prev = tail = root.lru_prev + tail.lru_next = root.lru_prev = link + return link.value def __setitem__(self, key, value, - cache_getitem=LRUCache.__getitem__, - cache_setitem=LRUCache.__setitem__, - cache_delitem=LRUCache.__delitem__): + cache_getitem=Cache.__getitem__, + cache_setitem=Cache.__setitem__): time = self.__timer() self.expire(time) try: - _, link = cache_getitem(self, key) + oldlink = cache_getitem(self, key) except KeyError: - link = Link() - cache_setitem(self, key, (value, link)) - try: - link.prev.next = link.next - link.next.prev = link.prev - except AttributeError: - pass + oldlink = None + link = Link() + link.key = key + link.value = value + link.expire = time + self.__ttl + cache_setitem(self, key, link) + if oldlink: + oldlink.unlink() root = self.__root - link.data = (key, time + self.__ttl) - link.prev = tail = root.prev - link.next = root - tail.next = root.prev = link + link.ttl_next = root + link.ttl_prev = tail = root.ttl_prev + tail.ttl_next = root.ttl_prev = link + link.lru_next = root + link.lru_prev = tail = root.lru_prev + tail.lru_next = root.lru_prev = link def __delitem__(self, key, - cache_getitem=LRUCache.__getitem__, - cache_delitem=LRUCache.__delitem__): - _, link = cache_getitem(self, key) + cache_getitem=Cache.__getitem__, + cache_delitem=Cache.__delitem__): + link = cache_getitem(self, key) cache_delitem(self, key) link.unlink() self.expire() - def expire(self, time=None, cache_delitem=LRUCache.__delitem__): + def expire(self, time=None): """Remove expired items from the cache.""" if time is None: time = self.__timer() root = self.__root - head = root.next - while head is not root and head.data[1] < time: - cache_delitem(self, head.data[0]) - head.next.prev = root - head = root.next = head.next - - def pop(self, key, default=_marker): - try: - value, link = LRUCache.__getitem__(self, key) - except KeyError: - if default is _marker: - raise - return default - LRUCache.__delitem__(self, key) + head = root.ttl_next + cache_delitem = Cache.__delitem__ + while head is not root and head.expire < time: + cache_delitem(self, head.key) + next = head.ttl_next + head.unlink() + head = next + + def popitem(self): + """Remove and return the `(key, value)` pair least recently used.""" + root = self.__root + link = root.lru_next + if link is root: + raise KeyError('cache is empty') + key = link.key + Cache.__delitem__(self, key) link.unlink() - self.expire() - return value + return (key, link.value) @property def timer(self): diff --git a/tests/__init__.py b/tests/__init__.py index bd6bc2d..4f63c6f 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -11,6 +11,7 @@ class CacheTestMixin(object): self.assertEqual(1, cache.getsizeof(None)) self.assertEqual(1, cache.getsizeof('')) self.assertEqual(1, cache.getsizeof(0)) + self.assertTrue(repr(cache).startswith(cache.__class__.__name__)) def test_insert(self): cache = self.make_cache(maxsize=2) @@ -132,57 +133,13 @@ class CacheTestMixin(object): self.assertNotIn(2, cache) with self.assertRaises(ValueError): - cache[4] = 4 + cache[3] = 4 self.assertEqual(1, len(cache)) self.assertEqual(3, cache.currsize) self.assertEqual(3, cache[3]) - -class LRUCacheTestMixin(CacheTestMixin): - - def test_lru_insert(self): - cache = self.make_cache(maxsize=2) - - cache[1] = 1 - cache[2] = 2 - cache[3] = 3 - - self.assertEqual(len(cache), 2) - self.assertEqual(cache[2], 2) - self.assertEqual(cache[3], 3) - self.assertNotIn(1, cache) - - cache[2] - cache[4] = 4 - self.assertEqual(len(cache), 2) - self.assertEqual(cache[2], 2) - self.assertEqual(cache[4], 4) - self.assertNotIn(3, cache) - - cache[5] = 5 - self.assertEqual(len(cache), 2) - self.assertEqual(cache[4], 4) - self.assertEqual(cache[5], 5) - self.assertNotIn(2, cache) - - def test_lru_getsizeof(self): - cache = self.make_cache(maxsize=3, getsizeof=lambda x: x) - - cache[1] = 1 - cache[2] = 2 - - self.assertEqual(len(cache), 2) - self.assertEqual(cache[1], 1) - self.assertEqual(cache[2], 2) - - cache[3] = 3 - - self.assertEqual(len(cache), 1) - self.assertEqual(cache[3], 3) - self.assertNotIn(1, cache) - self.assertNotIn(2, cache) - with self.assertRaises(ValueError): cache[4] = 4 - self.assertEqual(len(cache), 1) - self.assertEqual(cache[3], 3) + self.assertEqual(1, len(cache)) + self.assertEqual(3, cache.currsize) + self.assertEqual(3, cache[3]) diff --git a/tests/test_lrucache.py b/tests/test_lrucache.py index 9bedcb5..9f8112e 100644 --- a/tests/test_lrucache.py +++ b/tests/test_lrucache.py @@ -1,6 +1,6 @@ import unittest -from . import LRUCacheTestMixin +from . import CacheTestMixin from cachetools import LRUCache, lru_cache @@ -14,11 +14,58 @@ def cached_typed(n): return n -class LRUCacheTest(unittest.TestCase, LRUCacheTestMixin): +class LRUCacheTest(unittest.TestCase, CacheTestMixin): def make_cache(self, maxsize, getsizeof=None): return LRUCache(maxsize, getsizeof) + def test_lru_insert(self): + cache = self.make_cache(maxsize=2) + + cache[1] = 1 + cache[2] = 2 + cache[3] = 3 + + self.assertEqual(len(cache), 2) + self.assertEqual(cache[2], 2) + self.assertEqual(cache[3], 3) + self.assertNotIn(1, cache) + + cache[2] + cache[4] = 4 + self.assertEqual(len(cache), 2) + self.assertEqual(cache[2], 2) + self.assertEqual(cache[4], 4) + self.assertNotIn(3, cache) + + cache[5] = 5 + self.assertEqual(len(cache), 2) + self.assertEqual(cache[4], 4) + self.assertEqual(cache[5], 5) + self.assertNotIn(2, cache) + + def test_lru_getsizeof(self): + cache = self.make_cache(maxsize=3, getsizeof=lambda x: x) + + cache[1] = 1 + cache[2] = 2 + + self.assertEqual(len(cache), 2) + self.assertEqual(cache[1], 1) + self.assertEqual(cache[2], 2) + + cache[3] = 3 + + self.assertEqual(len(cache), 1) + self.assertEqual(cache[3], 3) + self.assertNotIn(1, cache) + self.assertNotIn(2, cache) + + with self.assertRaises(ValueError): + cache[4] = 4 + self.assertEqual(len(cache), 1) + self.assertEqual(cache[3], 3) + def test_decorator(self): self.assertEqual(cached.cache_info(), (0, 0, 2, 0)) self.assertEqual(cached(1), 1) diff --git a/tests/test_ttlcache.py b/tests/test_ttlcache.py index c89afe8..25477e2 100644 --- a/tests/test_ttlcache.py +++ b/tests/test_ttlcache.py @@ -1,6 +1,6 @@ import unittest -from . import LRUCacheTestMixin +from . import CacheTestMixin from cachetools import TTLCache, ttl_cache @@ -14,26 +14,24 @@ def cached_typed(n): return n -class TTLCacheTest(unittest.TestCase, LRUCacheTestMixin): +class Timer: + def __init__(self): + self.__time = 0 - def make_cache(self, maxsize, getsizeof=None): - return TTLCache(maxsize, ttl=0, timer=lambda: 0, getsizeof=getsizeof) + def __call__(self): + return self.__time - def make_ttl_cache(self, maxsize, ttl): - class Timer: - def __init__(self): - self.__time = 0 + def inc(self): + self.__time = self.__time + 1 - def __call__(self): - return self.__time - def inc(self): - self.__time = self.__time + 1 +class TTLCacheTest(unittest.TestCase, CacheTestMixin): - return TTLCache(maxsize, ttl, timer=Timer()) + def make_cache(self, maxsize, getsizeof=None, ttl=0): + return TTLCache(maxsize, ttl, timer=Timer(), getsizeof=getsizeof) def test_ttl_insert(self): - cache = self.make_ttl_cache(maxsize=2, ttl=2) + cache = self.make_cache(maxsize=2, ttl=2) self.assertEqual(cache.ttl, 2) cache[1] = 1 @@ -58,7 +56,7 @@ class TTLCacheTest(unittest.TestCase, LRUCacheTestMixin): self.assertEqual(3, cache[3]) def test_ttl_expire(self): - cache = self.make_ttl_cache(maxsize=3, ttl=0) + cache = self.make_cache(maxsize=3, ttl=0) self.assertEqual(cache.ttl, 0) cache[1] = 1 @@ -91,7 +89,7 @@ class TTLCacheTest(unittest.TestCase, LRUCacheTestMixin): self.assertNotIn(3, cache) def test_ttl_tuple_key(self): - cache = self.make_ttl_cache(maxsize=1, ttl=0) + cache = self.make_cache(maxsize=1, ttl=0) cache[(1, 2, 3)] = 42 self.assertEqual(42, cache[(1, 2, 3)]) @@ -101,6 +99,53 @@ class TTLCacheTest(unittest.TestCase, LRUCacheTestMixin): cache.expire() self.assertNotIn((1, 2, 3), cache) + def test_lru_insert(self): + cache = self.make_cache(maxsize=2) + + cache[1] = 1 + cache[2] = 2 + cache[3] = 3 + + self.assertEqual(len(cache), 2) + self.assertEqual(cache[2], 2) + self.assertEqual(cache[3], 3) + self.assertNotIn(1, cache) + + cache[2] + cache[4] = 4 + self.assertEqual(len(cache), 2) + self.assertEqual(cache[2], 2) + self.assertEqual(cache[4], 4) + self.assertNotIn(3, cache) + + cache[5] = 5 + self.assertEqual(len(cache), 2) + self.assertEqual(cache[4], 4) + self.assertEqual(cache[5], 5) + self.assertNotIn(2, cache) + + def test_lru_getsizeof(self): + cache = self.make_cache(maxsize=3, getsizeof=lambda x: x) + + cache[1] = 1 + cache[2] = 2 + + self.assertEqual(len(cache), 2) + self.assertEqual(cache[1], 1) + self.assertEqual(cache[2], 2) + + cache[3] = 3 + + self.assertEqual(len(cache), 1) + self.assertEqual(cache[3], 3) + self.assertNotIn(1, cache) + self.assertNotIn(2, cache) + + with self.assertRaises(ValueError): + cache[4] = 4 + self.assertEqual(len(cache), 1) + self.assertEqual(cache[3], 3) + def test_decorator(self): self.assertEqual(cached.cache_info(), (0, 0, 2, 0)) self.assertEqual(cached(1), 1) -- cgit v1.2.3 From a49f49f2fb5a2d82cb2c1585b865dce390f7f174 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Mon, 10 Nov 2014 16:47:47 +0100 Subject: Fix #20: Move TTLCache.ExpiredError to module. --- CHANGES.rst | 2 ++ cachetools/__init__.py | 4 +++- cachetools/errors.py | 7 +++++++ cachetools/ttlcache.py | 18 +++++++++--------- docs/index.rst | 22 ++++++++++++---------- tests/test_ttlcache.py | 8 ++++---- 6 files changed, 37 insertions(+), 24 deletions(-) create mode 100644 cachetools/errors.py diff --git a/CHANGES.rst b/CHANGES.rst index d68bb9a..c5f4b84 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,6 +1,8 @@ 0.7.0 UNRELEASED ---------------- +- Move ``TTLCache.ExpiredError`` to module level. + - Refactor ``LRUCache``, ``TTLCache``. - Remove ``NullContext`` implementation based on ``ExitStack``. diff --git a/cachetools/__init__.py b/cachetools/__init__.py index 1da08fc..4c7e590 100644 --- a/cachetools/__init__.py +++ b/cachetools/__init__.py @@ -6,12 +6,14 @@ from .lfucache import LFUCache, lfu_cache from .lrucache import LRUCache, lru_cache from .ttlcache import TTLCache, ttl_cache from .decorators import cachedmethod +from .errors import ExpiredError __all__ = ( 'Cache', 'RRCache', 'LFUCache', 'LRUCache', 'TTLCache', 'rr_cache', 'lfu_cache', 'lru_cache', 'ttl_cache', - 'cachedmethod' + 'cachedmethod', + 'ExpiredError' ) __version__ = '0.7.0' diff --git a/cachetools/errors.py b/cachetools/errors.py new file mode 100644 index 0000000..552137b --- /dev/null +++ b/cachetools/errors.py @@ -0,0 +1,7 @@ +class ExpiredError(KeyError): + """Raised when a cached item's time-to-live has expired. + + This is a subclass of :exc:`KeyError`. + + """ + pass diff --git a/cachetools/ttlcache.py b/cachetools/ttlcache.py index b6dc2f0..c0b6835 100644 --- a/cachetools/ttlcache.py +++ b/cachetools/ttlcache.py @@ -1,5 +1,6 @@ from .cache import Cache from .decorators import cachedfunc +from .errors import ExpiredError from .lock import RLock import time @@ -41,13 +42,7 @@ class TTLCache(Cache): """ - class ExpiredError(KeyError): - """Raised when a cached item's time-to-live has expired. - - This is a subclass of :exc:`KeyError`. - - """ - pass + ExpiredError = ExpiredError # deprecated def __init__(self, maxsize, ttl, timer=time.time, getsizeof=None): if getsizeof is None: @@ -73,7 +68,7 @@ class TTLCache(Cache): def __getitem__(self, key, cache_getitem=Cache.__getitem__): link = cache_getitem(self, key) if link.expire < self.__timer(): - raise TTLCache.ExpiredError(key) + raise ExpiredError(key) next = link.lru_next prev = link.lru_prev prev.lru_next = next @@ -116,7 +111,12 @@ class TTLCache(Cache): self.expire() def expire(self, time=None): - """Remove expired items from the cache.""" + """Remove expired items from the cache. + + If `time` is not :const:`None`, remove all items whose + time-to-live would have expired by `time`. + + """ if time is None: time = self.__timer() root = self.__root diff --git a/docs/index.rst b/docs/index.rst index a22e0b4..b587f05 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -66,13 +66,14 @@ of one argument used to retrieve the size of an item's value. :members: .. autoclass:: TTLCache - :members: ExpiredError, timer, ttl + :members: + :exclude-members: ExpiredError Note that a cache item may expire at *any* time, so iterating over - the items of a :class:`TTLCache` may raise - :class:`TTLCache.ExpiredError` unexpectedly:: + the items of a :class:`TTLCache` may raise :exc:`ExpiredError` + unexpectedly:: - from cachetools import TTLCache + from cachetools import TTLCache, ExpiredError import time cache = TTLCache(maxsize=100, ttl=1) @@ -82,14 +83,9 @@ of one argument used to retrieve the size of an item's value. for key in cache: try: print(cache[key]) - except TTLCache.ExpiredError: + except ExpiredError: print('Key %r has expired' % key) - .. automethod:: expire(time=None) - - If `time` is not :const:`None`, remove all items whose - time-to-live would have expired by `time`. - Function Decorators ------------------------------------------------------------------------ @@ -189,6 +185,12 @@ Method Decorators print("PEP #1: %s" % peps.get(1)) +Exception Classes +------------------------------------------------------------------------ + +.. autoexception:: ExpiredError + + .. _mutable: http://docs.python.org/dev/glossary.html#term-mutable .. _mapping: http://docs.python.org/dev/glossary.html#term-mapping .. _cache algorithm: http://en.wikipedia.org/wiki/Cache_algorithms diff --git a/tests/test_ttlcache.py b/tests/test_ttlcache.py index 25477e2..a26d3b6 100644 --- a/tests/test_ttlcache.py +++ b/tests/test_ttlcache.py @@ -1,7 +1,7 @@ import unittest from . import CacheTestMixin -from cachetools import TTLCache, ttl_cache +from cachetools import ExpiredError, TTLCache, ttl_cache @ttl_cache(maxsize=2) @@ -62,12 +62,12 @@ class TTLCacheTest(unittest.TestCase, CacheTestMixin): cache[1] = 1 self.assertEqual(1, cache[1]) cache.timer.inc() - with self.assertRaises(TTLCache.ExpiredError): + with self.assertRaises(ExpiredError): cache[1] cache[2] = 2 self.assertEqual(2, cache[2]) cache.timer.inc() - with self.assertRaises(TTLCache.ExpiredError): + with self.assertRaises(ExpiredError): cache[2] cache[3] = 3 self.assertEqual(3, cache[3]) @@ -94,7 +94,7 @@ class TTLCacheTest(unittest.TestCase, CacheTestMixin): cache[(1, 2, 3)] = 42 self.assertEqual(42, cache[(1, 2, 3)]) cache.timer.inc() - with self.assertRaises(TTLCache.ExpiredError): + with self.assertRaises(ExpiredError): cache[(1, 2, 3)] cache.expire() self.assertNotIn((1, 2, 3), cache) -- cgit v1.2.3 From 6c829a41d5e0b78f40bcc77ef35f17ad443cc75a Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Mon, 10 Nov 2014 16:24:06 +0100 Subject: Fix #21: Add choice argument to RRCache constructor. --- CHANGES.rst | 2 + cachetools/rrcache.py | 16 ++++-- docs/index.rst | 2 +- tests/__init__.py | 61 +++++++++++++++------ tests/test_cache.py | 4 +- tests/test_lfucache.py | 47 ++++------------ tests/test_lrucache.py | 48 ++++------------- tests/test_rrcache.py | 56 +++++++++---------- tests/test_ttlcache.py | 142 ++++++++++++++++--------------------------------- 9 files changed, 153 insertions(+), 225 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index c5f4b84..bb17dcf 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,6 +1,8 @@ 0.7.0 UNRELEASED ---------------- +- Add `choice` argument to ``RRCache`` constructor. + - Move ``TTLCache.ExpiredError`` to module level. - Refactor ``LRUCache``, ``TTLCache``. diff --git a/cachetools/rrcache.py b/cachetools/rrcache.py index d2494c7..5116775 100644 --- a/cachetools/rrcache.py +++ b/cachetools/rrcache.py @@ -11,21 +11,31 @@ class RRCache(Cache): This class randomly selects candidate items and discards them to make space when necessary. + By default, items are selected from the list of cache keys using + :func:`random.choice`. The optional argument `choice` may specify + an alternative function that returns an arbitrary element from a + non-empty sequence. + """ + def __init__(self, maxsize, choice=random.choice, getsizeof=None): + Cache.__init__(self, maxsize, getsizeof=getsizeof) + self.__choice = choice + def popitem(self): """Remove and return a random `(key, value)` pair.""" try: - key = random.choice(list(self)) + key = self.__choice(list(self)) except IndexError: raise KeyError('cache is empty') return (key, self.pop(key)) -def rr_cache(maxsize=128, typed=False, getsizeof=None, lock=RLock): +def rr_cache(maxsize=128, choice=random.choice, typed=False, getsizeof=None, + lock=RLock): """Decorator to wrap a function with a memoizing callable that saves up to `maxsize` results based on a Random Replacement (RR) algorithm. """ - return cachedfunc(RRCache(maxsize, getsizeof), typed, lock) + return cachedfunc(RRCache(maxsize, choice, getsizeof), typed, lock) diff --git a/docs/index.rst b/docs/index.rst index b587f05..ded26e2 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -119,7 +119,7 @@ documentation for details. Unlike :func:`functools.lru_cache`, setting `maxsize` to zero or :const:`None` is not supported. -.. decorator:: rr_cache(maxsize=128, typed=False, getsizeof=None, lock=threading.RLock) +.. decorator:: rr_cache(maxsize=128, choice=random.choice, typed=False, getsizeof=None, lock=threading.RLock) Decorator that wraps a function with a memoizing callable that saves up to `maxsize` results based on a Random Replacement (RR) diff --git a/tests/__init__.py b/tests/__init__.py index 4f63c6f..2a1c7c2 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -1,10 +1,10 @@ class CacheTestMixin(object): - def make_cache(self, maxsize, getsizeof=None): + def cache(self, maxsize, getsizeof=None): raise NotImplementedError - def test_defaults(self): - cache = self.make_cache(maxsize=1) + def test_cache_defaults(self): + cache = self.cache(maxsize=1) self.assertEqual(0, len(cache)) self.assertEqual(1, cache.maxsize) self.assertEqual(0, cache.currsize) @@ -13,8 +13,8 @@ class CacheTestMixin(object): self.assertEqual(1, cache.getsizeof(0)) self.assertTrue(repr(cache).startswith(cache.__class__.__name__)) - def test_insert(self): - cache = self.make_cache(maxsize=2) + def test_cache_insert(self): + cache = self.cache(maxsize=2) cache.update({1: 1, 2: 2}) self.assertEqual(2, len(cache)) @@ -31,8 +31,8 @@ class CacheTestMixin(object): self.assertEqual(4, cache[4]) self.assertTrue(1 in cache or 2 in cache or 3 in cache) - def test_update(self): - cache = self.make_cache(maxsize=2) + def test_cache_update(self): + cache = self.cache(maxsize=2) cache.update({1: 1, 2: 2}) self.assertEqual(2, len(cache)) @@ -49,8 +49,8 @@ class CacheTestMixin(object): self.assertEqual('a', cache[1]) self.assertEqual('b', cache[2]) - def test_delete(self): - cache = self.make_cache(maxsize=2) + def test_cache_delete(self): + cache = self.cache(maxsize=2) cache.update({1: 1, 2: 2}) self.assertEqual(2, len(cache)) @@ -67,8 +67,8 @@ class CacheTestMixin(object): self.assertNotIn(1, cache) self.assertNotIn(2, cache) - def test_pop(self): - cache = self.make_cache(maxsize=2) + def test_cache_pop(self): + cache = self.cache(maxsize=2) cache.update({1: 1, 2: 2}) self.assertEqual(2, cache.pop(2)) @@ -87,8 +87,8 @@ class CacheTestMixin(object): self.assertEqual(None, cache.pop(1, None)) self.assertEqual(None, cache.pop(0, None)) - def test_popitem(self): - cache = self.make_cache(maxsize=2) + def test_cache_popitem(self): + cache = self.cache(maxsize=2) cache.update({1: 1, 2: 2}) self.assertIn(cache.pop(1), {1: 1, 2: 2}) @@ -99,8 +99,8 @@ class CacheTestMixin(object): with self.assertRaises(KeyError): cache.popitem() - def test_getsizeof(self): - cache = self.make_cache(maxsize=3, getsizeof=lambda x: x) + def test_cache_getsizeof(self): + cache = self.cache(maxsize=3, getsizeof=lambda x: x) self.assertEqual(3, cache.maxsize) self.assertEqual(0, cache.currsize) self.assertEqual(1, cache.getsizeof(1)) @@ -143,3 +143,34 @@ class CacheTestMixin(object): self.assertEqual(1, len(cache)) self.assertEqual(3, cache.currsize) self.assertEqual(3, cache[3]) + + +class DecoratorTestMixin(object): + + def decorator(self, maxsize, typed=False, lock=None): + raise NotImplementedError + + def test_decorator(self): + cached = self.decorator(maxsize=2)(lambda n: n) + + self.assertEqual(cached(1), 1) + self.assertEqual(cached.cache_info(), (0, 1, 2, 1)) + self.assertEqual(cached(1), 1) + self.assertEqual(cached.cache_info(), (1, 1, 2, 1)) + self.assertEqual(cached(1.0), 1.0) + self.assertEqual(cached.cache_info(), (2, 1, 2, 1)) + + cached.cache_clear() + self.assertEqual(cached(1), 1) + self.assertEqual(cached.cache_info(), (2, 2, 2, 1)) + + def test_typed_decorator(self): + cached = self.decorator(maxsize=2, typed=True)(lambda n: n) + self.assertEqual(cached(1), 1) + self.assertEqual(cached.cache_info(), (0, 1, 2, 1)) + self.assertEqual(cached(1), 1) + self.assertEqual(cached.cache_info(), (1, 1, 2, 1)) + self.assertEqual(cached(1.0), 1.0) + self.assertEqual(cached.cache_info(), (1, 2, 2, 2)) + self.assertEqual(cached(1.0), 1.0) + self.assertEqual(cached.cache_info(), (2, 2, 2, 2)) diff --git a/tests/test_cache.py b/tests/test_cache.py index 60e9220..d68f676 100644 --- a/tests/test_cache.py +++ b/tests/test_cache.py @@ -6,5 +6,5 @@ from cachetools import Cache class CacheTest(unittest.TestCase, CacheTestMixin): - def make_cache(self, maxsize, getsizeof=None): - return Cache(maxsize, getsizeof) + def cache(self, maxsize, getsizeof=None): + return Cache(maxsize, getsizeof=getsizeof) diff --git a/tests/test_lfucache.py b/tests/test_lfucache.py index 3f66681..c2e1bb7 100644 --- a/tests/test_lfucache.py +++ b/tests/test_lfucache.py @@ -1,26 +1,19 @@ import unittest -from . import CacheTestMixin +from . import CacheTestMixin, DecoratorTestMixin from cachetools import LFUCache, lfu_cache -@lfu_cache(maxsize=2) -def cached(n): - return n +class LFUCacheTest(unittest.TestCase, CacheTestMixin, DecoratorTestMixin): - -@lfu_cache(maxsize=2, typed=True, lock=None) -def cached_typed(n): - return n - - -class LFUCacheTest(unittest.TestCase, CacheTestMixin): - - def make_cache(self, maxsize, getsizeof=None): + def cache(self, maxsize, getsizeof=None): return LFUCache(maxsize, getsizeof) - def test_lfu_insert(self): - cache = self.make_cache(maxsize=2) + def decorator(self, maxsize, typed=False, lock=None): + return lfu_cache(maxsize, typed=typed, lock=lock) + + def test_lfu(self): + cache = self.cache(maxsize=2) cache[1] = 1 cache[1] @@ -38,7 +31,7 @@ class LFUCacheTest(unittest.TestCase, CacheTestMixin): self.assertEqual(cache[1], 1) def test_lfu_getsizeof(self): - cache = self.make_cache(maxsize=3, getsizeof=lambda x: x) + cache = self.cache(maxsize=3, getsizeof=lambda x: x) cache[1] = 1 cache[2] = 2 @@ -58,25 +51,3 @@ class LFUCacheTest(unittest.TestCase, CacheTestMixin): cache[4] = 4 self.assertEqual(len(cache), 1) self.assertEqual(cache[3], 3) - - def test_decorator(self): - self.assertEqual(cached(1), 1) - self.assertEqual(cached.cache_info(), (0, 1, 2, 1)) - self.assertEqual(cached(1), 1) - self.assertEqual(cached.cache_info(), (1, 1, 2, 1)) - self.assertEqual(cached(1.0), 1.0) - self.assertEqual(cached.cache_info(), (2, 1, 2, 1)) - - cached.cache_clear() - self.assertEqual(cached(1), 1) - self.assertEqual(cached.cache_info(), (2, 2, 2, 1)) - - def test_typed_decorator(self): - self.assertEqual(cached_typed(1), 1) - self.assertEqual(cached_typed.cache_info(), (0, 1, 2, 1)) - self.assertEqual(cached_typed(1), 1) - self.assertEqual(cached_typed.cache_info(), (1, 1, 2, 1)) - self.assertEqual(cached_typed(1.0), 1.0) - self.assertEqual(cached_typed.cache_info(), (1, 2, 2, 2)) - self.assertEqual(cached_typed(1.0), 1.0) - self.assertEqual(cached_typed.cache_info(), (2, 2, 2, 2)) diff --git a/tests/test_lrucache.py b/tests/test_lrucache.py index 9f8112e..bfcb9d7 100644 --- a/tests/test_lrucache.py +++ b/tests/test_lrucache.py @@ -1,26 +1,19 @@ import unittest -from . import CacheTestMixin +from . import CacheTestMixin, DecoratorTestMixin from cachetools import LRUCache, lru_cache -@lru_cache(maxsize=2) -def cached(n): - return n +class LRUCacheTest(unittest.TestCase, CacheTestMixin, DecoratorTestMixin): - -@lru_cache(maxsize=2, typed=True, lock=None) -def cached_typed(n): - return n - - -class LRUCacheTest(unittest.TestCase, CacheTestMixin): - - def make_cache(self, maxsize, getsizeof=None): + def cache(self, maxsize, getsizeof=None): return LRUCache(maxsize, getsizeof) - def test_lru_insert(self): - cache = self.make_cache(maxsize=2) + def decorator(self, maxsize, typed=False, lock=None): + return lru_cache(maxsize, typed=typed, lock=lock) + + def test_lru(self): + cache = self.cache(maxsize=2) cache[1] = 1 cache[2] = 2 @@ -45,7 +38,7 @@ class LRUCacheTest(unittest.TestCase, CacheTestMixin): self.assertNotIn(2, cache) def test_lru_getsizeof(self): - cache = self.make_cache(maxsize=3, getsizeof=lambda x: x) + cache = self.cache(maxsize=3, getsizeof=lambda x: x) cache[1] = 1 cache[2] = 2 @@ -65,26 +58,3 @@ class LRUCacheTest(unittest.TestCase, CacheTestMixin): cache[4] = 4 self.assertEqual(len(cache), 1) self.assertEqual(cache[3], 3) - - def test_decorator(self): - self.assertEqual(cached.cache_info(), (0, 0, 2, 0)) - self.assertEqual(cached(1), 1) - self.assertEqual(cached.cache_info(), (0, 1, 2, 1)) - self.assertEqual(cached(1), 1) - self.assertEqual(cached.cache_info(), (1, 1, 2, 1)) - self.assertEqual(cached(1.0), 1.0) - self.assertEqual(cached.cache_info(), (2, 1, 2, 1)) - - cached.cache_clear() - self.assertEqual(cached(1), 1) - self.assertEqual(cached.cache_info(), (2, 2, 2, 1)) - - def test_typed_decorator(self): - self.assertEqual(cached_typed(1), 1) - self.assertEqual(cached_typed.cache_info(), (0, 1, 2, 1)) - self.assertEqual(cached_typed(1), 1) - self.assertEqual(cached_typed.cache_info(), (1, 1, 2, 1)) - self.assertEqual(cached_typed(1.0), 1.0) - self.assertEqual(cached_typed.cache_info(), (1, 2, 2, 2)) - self.assertEqual(cached_typed(1.0), 1.0) - self.assertEqual(cached_typed.cache_info(), (2, 2, 2, 2)) diff --git a/tests/test_rrcache.py b/tests/test_rrcache.py index fb5df22..7713e5a 100644 --- a/tests/test_rrcache.py +++ b/tests/test_rrcache.py @@ -1,42 +1,38 @@ import unittest +import random -from . import CacheTestMixin +from . import CacheTestMixin, DecoratorTestMixin from cachetools import RRCache, rr_cache -@rr_cache(maxsize=2) -def cached(n): - return n +class RRCacheTest(unittest.TestCase, CacheTestMixin, DecoratorTestMixin): + def cache(self, maxsize, choice=random.choice, getsizeof=None): + return RRCache(maxsize, choice=choice, getsizeof=getsizeof) -@rr_cache(maxsize=2, typed=True, lock=None) -def cached_typed(n): - return n + def decorator(self, maxsize, choice=random.choice, typed=False, lock=None): + return rr_cache(maxsize, choice=choice, typed=typed, lock=lock) + def test_choice(self): + cache = self.cache(maxsize=2, choice=min) -class RRCacheTest(unittest.TestCase, CacheTestMixin): + cache[1] = 1 + cache[2] = 2 + cache[3] = 3 - def make_cache(self, maxsize, getsizeof=None): - return RRCache(maxsize, getsizeof) + self.assertEqual(2, len(cache)) + self.assertEqual(2, cache[2]) + self.assertEqual(3, cache[3]) + self.assertNotIn(1, cache) - def test_decorator(self): - self.assertEqual(cached(1), 1) - self.assertEqual(cached.cache_info(), (0, 1, 2, 1)) - self.assertEqual(cached(1), 1) - self.assertEqual(cached.cache_info(), (1, 1, 2, 1)) - self.assertEqual(cached(1.0), 1.0) - self.assertEqual(cached.cache_info(), (2, 1, 2, 1)) + cache[0] = 0 + self.assertEqual(2, len(cache)) + self.assertEqual(0, cache[0]) + self.assertEqual(3, cache[3]) + self.assertNotIn(2, cache) - cached.cache_clear() - self.assertEqual(cached(1), 1) - self.assertEqual(cached.cache_info(), (2, 2, 2, 1)) - - def test_typed_decorator(self): - self.assertEqual(cached_typed(1), 1) - self.assertEqual(cached_typed.cache_info(), (0, 1, 2, 1)) - self.assertEqual(cached_typed(1), 1) - self.assertEqual(cached_typed.cache_info(), (1, 1, 2, 1)) - self.assertEqual(cached_typed(1.0), 1.0) - self.assertEqual(cached_typed.cache_info(), (1, 2, 2, 2)) - self.assertEqual(cached_typed(1.0), 1.0) - self.assertEqual(cached_typed.cache_info(), (2, 2, 2, 2)) + cache[4] = 4 + self.assertEqual(2, len(cache)) + self.assertEqual(3, cache[3]) + self.assertEqual(4, cache[4]) + self.assertNotIn(0, cache) diff --git a/tests/test_ttlcache.py b/tests/test_ttlcache.py index a26d3b6..48b50f1 100644 --- a/tests/test_ttlcache.py +++ b/tests/test_ttlcache.py @@ -1,19 +1,9 @@ import unittest -from . import CacheTestMixin +from . import CacheTestMixin, DecoratorTestMixin from cachetools import ExpiredError, TTLCache, ttl_cache -@ttl_cache(maxsize=2) -def cached(n): - return n - - -@ttl_cache(maxsize=2, typed=True, lock=None) -def cached_typed(n): - return n - - class Timer: def __init__(self): self.__time = 0 @@ -21,17 +11,20 @@ class Timer: def __call__(self): return self.__time - def inc(self): - self.__time = self.__time + 1 + def tick(self): + self.__time += 1 -class TTLCacheTest(unittest.TestCase, CacheTestMixin): +class TTLCacheTest(unittest.TestCase, CacheTestMixin, DecoratorTestMixin): - def make_cache(self, maxsize, getsizeof=None, ttl=0): + def cache(self, maxsize, ttl=0, getsizeof=None): return TTLCache(maxsize, ttl, timer=Timer(), getsizeof=getsizeof) - def test_ttl_insert(self): - cache = self.make_cache(maxsize=2, ttl=2) + def decorator(self, maxsize, ttl=0, typed=False, lock=None): + return ttl_cache(maxsize, ttl, timer=Timer(), typed=typed, lock=lock) + + def test_ttl(self): + cache = self.cache(maxsize=2, ttl=2) self.assertEqual(cache.ttl, 2) cache[1] = 1 @@ -39,14 +32,14 @@ class TTLCacheTest(unittest.TestCase, CacheTestMixin): self.assertEqual(1, len(cache)) self.assertEqual(1, cache[1]) - cache.timer.inc() + cache.timer.tick() cache[2] = 2 self.assertEqual(2, len(cache)) self.assertEqual(1, cache[1]) self.assertEqual(2, cache[2]) - cache.timer.inc() + cache.timer.tick() cache[1] cache[3] = 3 @@ -55,18 +48,43 @@ class TTLCacheTest(unittest.TestCase, CacheTestMixin): self.assertNotIn(2, cache) self.assertEqual(3, cache[3]) - def test_ttl_expire(self): - cache = self.make_cache(maxsize=3, ttl=0) + def test_lru(self): + cache = self.cache(maxsize=2) + + cache[1] = 1 + cache[2] = 2 + cache[3] = 3 + + self.assertEqual(len(cache), 2) + self.assertEqual(cache[2], 2) + self.assertEqual(cache[3], 3) + self.assertNotIn(1, cache) + + cache[2] + cache[4] = 4 + self.assertEqual(len(cache), 2) + self.assertEqual(cache[2], 2) + self.assertEqual(cache[4], 4) + self.assertNotIn(3, cache) + + cache[5] = 5 + self.assertEqual(len(cache), 2) + self.assertEqual(cache[4], 4) + self.assertEqual(cache[5], 5) + self.assertNotIn(2, cache) + + def test_expire(self): + cache = self.cache(maxsize=3, ttl=0) self.assertEqual(cache.ttl, 0) cache[1] = 1 self.assertEqual(1, cache[1]) - cache.timer.inc() + cache.timer.tick() with self.assertRaises(ExpiredError): cache[1] cache[2] = 2 self.assertEqual(2, cache[2]) - cache.timer.inc() + cache.timer.tick() with self.assertRaises(ExpiredError): cache[2] cache[3] = 3 @@ -81,90 +99,20 @@ class TTLCacheTest(unittest.TestCase, CacheTestMixin): self.assertNotIn(2, cache) self.assertEqual(3, cache[3]) - cache.timer.inc() + cache.timer.tick() cache.expire() self.assertEqual(0, len(cache)) self.assertNotIn(1, cache) self.assertNotIn(2, cache) self.assertNotIn(3, cache) - def test_ttl_tuple_key(self): - cache = self.make_cache(maxsize=1, ttl=0) + def test_tuple_key(self): + cache = self.cache(maxsize=1, ttl=0) cache[(1, 2, 3)] = 42 self.assertEqual(42, cache[(1, 2, 3)]) - cache.timer.inc() + cache.timer.tick() with self.assertRaises(ExpiredError): cache[(1, 2, 3)] cache.expire() self.assertNotIn((1, 2, 3), cache) - - def test_lru_insert(self): - cache = self.make_cache(maxsize=2) - - cache[1] = 1 - cache[2] = 2 - cache[3] = 3 - - self.assertEqual(len(cache), 2) - self.assertEqual(cache[2], 2) - self.assertEqual(cache[3], 3) - self.assertNotIn(1, cache) - - cache[2] - cache[4] = 4 - self.assertEqual(len(cache), 2) - self.assertEqual(cache[2], 2) - self.assertEqual(cache[4], 4) - self.assertNotIn(3, cache) - - cache[5] = 5 - self.assertEqual(len(cache), 2) - self.assertEqual(cache[4], 4) - self.assertEqual(cache[5], 5) - self.assertNotIn(2, cache) - - def test_lru_getsizeof(self): - cache = self.make_cache(maxsize=3, getsizeof=lambda x: x) - - cache[1] = 1 - cache[2] = 2 - - self.assertEqual(len(cache), 2) - self.assertEqual(cache[1], 1) - self.assertEqual(cache[2], 2) - - cache[3] = 3 - - self.assertEqual(len(cache), 1) - self.assertEqual(cache[3], 3) - self.assertNotIn(1, cache) - self.assertNotIn(2, cache) - - with self.assertRaises(ValueError): - cache[4] = 4 - self.assertEqual(len(cache), 1) - self.assertEqual(cache[3], 3) - - def test_decorator(self): - self.assertEqual(cached.cache_info(), (0, 0, 2, 0)) - self.assertEqual(cached(1), 1) - self.assertEqual(cached.cache_info(), (0, 1, 2, 1)) - self.assertEqual(cached(1), 1) - self.assertEqual(cached.cache_info(), (1, 1, 2, 1)) - self.assertEqual(cached(1.0), 1.0) - self.assertEqual(cached.cache_info(), (2, 1, 2, 1)) - - cached.cache_clear() - self.assertEqual(cached(1), 1) - self.assertEqual(cached.cache_info(), (2, 2, 2, 1)) - - def test_typed_decorator(self): - self.assertEqual(cached_typed(1), 1) - self.assertEqual(cached_typed.cache_info(), (0, 1, 2, 1)) - self.assertEqual(cached_typed(1), 1) - self.assertEqual(cached_typed.cache_info(), (1, 1, 2, 1)) - self.assertEqual(cached_typed(1.0), 1.0) - self.assertEqual(cached_typed.cache_info(), (1, 2, 2, 2)) - self.assertEqual(cached_typed(1.0), 1.0) - self.assertEqual(cached_typed.cache_info(), (2, 2, 2, 2)) -- cgit v1.2.3 From 8dd276486cc814c1fd7b66e710a757249695390c Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Tue, 11 Nov 2014 06:43:58 +0100 Subject: Fix #20: Deprecate TTLCache.ExpiredError --- CHANGES.rst | 6 ++---- README.rst | 1 + cachetools/__init__.py | 4 +--- cachetools/errors.py | 7 ------- cachetools/ttlcache.py | 8 ++++---- docs/index.rst | 13 ++++--------- tests/test_ttlcache.py | 8 ++++---- 7 files changed, 16 insertions(+), 31 deletions(-) delete mode 100644 cachetools/errors.py diff --git a/CHANGES.rst b/CHANGES.rst index bb17dcf..bbcc780 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -3,13 +3,11 @@ - Add `choice` argument to ``RRCache`` constructor. -- Move ``TTLCache.ExpiredError`` to module level. +- Deprecate ``TTLCache.ExpiredError``. - Refactor ``LRUCache``, ``TTLCache``. -- Remove ``NullContext`` implementation based on ``ExitStack``. - -- Rename `Changes` to `CHANGES.rst`. +- Use custom ``NullContext`` implementation. 0.6.0 2014-10-13 diff --git a/README.rst b/README.rst index 3b69c69..c14e124 100644 --- a/README.rst +++ b/README.rst @@ -18,6 +18,7 @@ including a variant of the Python 3 Standard Library >>> cache['second'] 2 >>> cache['fourth'] = 4 + >>> cache LRUCache([('second', 2), ('fourth', 4)], maxsize=2, currsize=2) diff --git a/cachetools/__init__.py b/cachetools/__init__.py index 4c7e590..1da08fc 100644 --- a/cachetools/__init__.py +++ b/cachetools/__init__.py @@ -6,14 +6,12 @@ from .lfucache import LFUCache, lfu_cache from .lrucache import LRUCache, lru_cache from .ttlcache import TTLCache, ttl_cache from .decorators import cachedmethod -from .errors import ExpiredError __all__ = ( 'Cache', 'RRCache', 'LFUCache', 'LRUCache', 'TTLCache', 'rr_cache', 'lfu_cache', 'lru_cache', 'ttl_cache', - 'cachedmethod', - 'ExpiredError' + 'cachedmethod' ) __version__ = '0.7.0' diff --git a/cachetools/errors.py b/cachetools/errors.py deleted file mode 100644 index 552137b..0000000 --- a/cachetools/errors.py +++ /dev/null @@ -1,7 +0,0 @@ -class ExpiredError(KeyError): - """Raised when a cached item's time-to-live has expired. - - This is a subclass of :exc:`KeyError`. - - """ - pass diff --git a/cachetools/ttlcache.py b/cachetools/ttlcache.py index c0b6835..de68f02 100644 --- a/cachetools/ttlcache.py +++ b/cachetools/ttlcache.py @@ -1,6 +1,5 @@ from .cache import Cache from .decorators import cachedfunc -from .errors import ExpiredError from .lock import RLock import time @@ -33,7 +32,8 @@ class TTLCache(Cache): that expire because they have exceeded their time-to-live will be removed. If no expired items are there to remove, the least recently used items will be discarded first to make space when - necessary. + necessary. Trying to access an expired item will raise a + :exc:`KeyError`. By default, the time-to-live is specified in seconds, and the standard :func:`time.time` function is used to retrieve the @@ -42,7 +42,7 @@ class TTLCache(Cache): """ - ExpiredError = ExpiredError # deprecated + ExpiredError = KeyError # deprecated def __init__(self, maxsize, ttl, timer=time.time, getsizeof=None): if getsizeof is None: @@ -68,7 +68,7 @@ class TTLCache(Cache): def __getitem__(self, key, cache_getitem=Cache.__getitem__): link = cache_getitem(self, key) if link.expire < self.__timer(): - raise ExpiredError(key) + raise KeyError(key) next = link.lru_next prev = link.lru_prev prev.lru_next = next diff --git a/docs/index.rst b/docs/index.rst index ded26e2..02455f9 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -20,6 +20,7 @@ including a variant of the Python 3 Standard Library >>> cache['second'] 2 >>> cache['fourth'] = 4 + >>> cache LRUCache([('second', 2), ('fourth', 4)], maxsize=2, currsize=2) For the purpose of this module, a *cache* is a mutable_ mapping_ of a @@ -70,10 +71,10 @@ of one argument used to retrieve the size of an item's value. :exclude-members: ExpiredError Note that a cache item may expire at *any* time, so iterating over - the items of a :class:`TTLCache` may raise :exc:`ExpiredError` + the items of a :class:`TTLCache` may raise :exc:`KeyError` unexpectedly:: - from cachetools import TTLCache, ExpiredError + from cachetools import TTLCache import time cache = TTLCache(maxsize=100, ttl=1) @@ -83,7 +84,7 @@ of one argument used to retrieve the size of an item's value. for key in cache: try: print(cache[key]) - except ExpiredError: + except KeyError: print('Key %r has expired' % key) @@ -185,12 +186,6 @@ Method Decorators print("PEP #1: %s" % peps.get(1)) -Exception Classes ------------------------------------------------------------------------- - -.. autoexception:: ExpiredError - - .. _mutable: http://docs.python.org/dev/glossary.html#term-mutable .. _mapping: http://docs.python.org/dev/glossary.html#term-mapping .. _cache algorithm: http://en.wikipedia.org/wiki/Cache_algorithms diff --git a/tests/test_ttlcache.py b/tests/test_ttlcache.py index 48b50f1..cdf4a90 100644 --- a/tests/test_ttlcache.py +++ b/tests/test_ttlcache.py @@ -1,7 +1,7 @@ import unittest from . import CacheTestMixin, DecoratorTestMixin -from cachetools import ExpiredError, TTLCache, ttl_cache +from cachetools import TTLCache, ttl_cache class Timer: @@ -80,12 +80,12 @@ class TTLCacheTest(unittest.TestCase, CacheTestMixin, DecoratorTestMixin): cache[1] = 1 self.assertEqual(1, cache[1]) cache.timer.tick() - with self.assertRaises(ExpiredError): + with self.assertRaises(KeyError): cache[1] cache[2] = 2 self.assertEqual(2, cache[2]) cache.timer.tick() - with self.assertRaises(ExpiredError): + with self.assertRaises(KeyError): cache[2] cache[3] = 3 self.assertEqual(3, cache[3]) @@ -112,7 +112,7 @@ class TTLCacheTest(unittest.TestCase, CacheTestMixin, DecoratorTestMixin): cache[(1, 2, 3)] = 42 self.assertEqual(42, cache[(1, 2, 3)]) cache.timer.tick() - with self.assertRaises(ExpiredError): + with self.assertRaises(KeyError): cache[(1, 2, 3)] cache.expire() self.assertNotIn((1, 2, 3), cache) -- cgit v1.2.3 From cef66537c7393bbfc0fbd774571d56b2635f0455 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Tue, 11 Nov 2014 12:27:05 +0100 Subject: Fix #23: Refactor LFUCache. --- CHANGES.rst | 9 +++++---- cachetools/lfucache.py | 26 ++++++++++++++------------ 2 files changed, 19 insertions(+), 16 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index bbcc780..b4a2b4a 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,13 +1,14 @@ 0.7.0 UNRELEASED ---------------- -- Add `choice` argument to ``RRCache`` constructor. - - Deprecate ``TTLCache.ExpiredError``. -- Refactor ``LRUCache``, ``TTLCache``. +- Add `choice` argument to ``RRCache`` constructor. + +- Refactor ``LFUCache``, ``LRUCache`` and ``TTLCache``. -- Use custom ``NullContext`` implementation. +- Always use custom ``NullContext`` implementation for unsynchronized + function decorators. 0.6.0 2014-10-13 diff --git a/cachetools/lfucache.py b/cachetools/lfucache.py index e0ea562..58b7712 100644 --- a/cachetools/lfucache.py +++ b/cachetools/lfucache.py @@ -2,6 +2,7 @@ from .cache import Cache from .decorators import cachedfunc from .lock import RLock +import collections import operator @@ -14,28 +15,29 @@ class LFUCache(Cache): """ def __init__(self, maxsize, getsizeof=None): - if getsizeof is None: - Cache.__init__(self, maxsize) - else: - Cache.__init__(self, maxsize, lambda e: getsizeof(e[0])) - self.getsizeof = getsizeof + Cache.__init__(self, maxsize, getsizeof=getsizeof) + self.__counter = collections.Counter() def __getitem__(self, key, cache_getitem=Cache.__getitem__): - entry = cache_getitem(self, key) - entry[1] += 1 - return entry[0] + value = cache_getitem(self, key) + self.__counter[key] += 1 + return value def __setitem__(self, key, value, cache_setitem=Cache.__setitem__): - cache_setitem(self, key, [value, 0]) + cache_setitem(self, key, value) + self.__counter[key] += 1 + + def __delitem__(self, key, cache_delitem=Cache.__delitem__): + cache_delitem(self, key) + del self.__counter[key] def popitem(self): """Remove and return the `(key, value)` pair least frequently used.""" - items = ((key, Cache.__getitem__(self, key)[1]) for key in self) try: - key, _ = min(items, key=operator.itemgetter(1)) + key, _ = min(self.__counter.items(), key=operator.itemgetter(1)) except ValueError: raise KeyError('cache is empty') - return (key, self.pop(key)) + return key, self.pop(key) def lfu_cache(maxsize=128, typed=False, getsizeof=None, lock=RLock): -- cgit v1.2.3 From b0242092f82c0cd5dc7193b10366780f7f7219c8 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Tue, 11 Nov 2014 16:14:50 +0100 Subject: Fix #24: Skip expired items when iterating over TTLCache. --- cachetools/cache.py | 12 +++-- cachetools/ttlcache.py | 33 ++++++++++++- tests/test_ttlcache.py | 127 ++++++++++++++++++++++++++++++++++--------------- 3 files changed, 128 insertions(+), 44 deletions(-) diff --git a/cachetools/cache.py b/cachetools/cache.py index abbafc5..a0802b8 100644 --- a/cachetools/cache.py +++ b/cachetools/cache.py @@ -1,6 +1,10 @@ import collections +def one(value): + return 1 + + class Cache(collections.MutableMapping): """Mutable mapping to serve as a simple cache or cache base class. @@ -20,7 +24,7 @@ class Cache(collections.MutableMapping): def __init__(self, maxsize, getsizeof=None): self.__mapping = dict() self.__maxsize = maxsize - self.__getsizeof = getsizeof or self.__one + self.__getsizeof = getsizeof or one self.__currsize = 0 def __repr__(self): @@ -72,6 +76,6 @@ class Cache(collections.MutableMapping): """Return the size of a cache element.""" return self.__getsizeof(value) - @staticmethod - def __one(value): - return 1 + def _getitemsize(self, key): + # TODO: decide on interface, make public + return self.__mapping[key][1] diff --git a/cachetools/ttlcache.py b/cachetools/ttlcache.py index de68f02..f11a577 100644 --- a/cachetools/ttlcache.py +++ b/cachetools/ttlcache.py @@ -26,7 +26,7 @@ class Link(object): class TTLCache(Cache): - """Cache implementation with per-item time-to-live (TTL) value. + """LRU Cache implementation with per-item time-to-live (TTL) value. This class associates a time-to-live value with each item. Items that expire because they have exceeded their time-to-live will be @@ -110,6 +110,25 @@ class TTLCache(Cache): link.unlink() self.expire() + def __iter__(self): + timer = self.__timer + root = self.__root + curr = root.ttl_next + while curr is not root: + if not (curr.expire < timer()): + yield curr.key + curr = curr.ttl_next + + def __len__(self, cache_len=Cache.__len__): + expired = 0 + time = self.__timer() + root = self.__root + head = root.ttl_next + while head is not root and head.expire < time: + expired += 1 + head = head.ttl_next + return cache_len(self) - expired + def expire(self, time=None): """Remove expired items from the cache. @@ -139,6 +158,18 @@ class TTLCache(Cache): link.unlink() return (key, link.value) + @property + def currsize(self): + getsize = Cache._getitemsize # TODO: decide on final interface + expired = 0 + time = self.__timer() + root = self.__root + head = root.ttl_next + while head is not root and head.expire < time: + expired += getsize(self, head.key) + head = head.ttl_next + return super(TTLCache, self).currsize - expired + @property def timer(self): """Return the timer used by the cache.""" diff --git a/tests/test_ttlcache.py b/tests/test_ttlcache.py index cdf4a90..ef36446 100644 --- a/tests/test_ttlcache.py +++ b/tests/test_ttlcache.py @@ -23,31 +23,6 @@ class TTLCacheTest(unittest.TestCase, CacheTestMixin, DecoratorTestMixin): def decorator(self, maxsize, ttl=0, typed=False, lock=None): return ttl_cache(maxsize, ttl, timer=Timer(), typed=typed, lock=lock) - def test_ttl(self): - cache = self.cache(maxsize=2, ttl=2) - self.assertEqual(cache.ttl, 2) - - cache[1] = 1 - - self.assertEqual(1, len(cache)) - self.assertEqual(1, cache[1]) - - cache.timer.tick() - cache[2] = 2 - - self.assertEqual(2, len(cache)) - self.assertEqual(1, cache[1]) - self.assertEqual(2, cache[2]) - - cache.timer.tick() - cache[1] - cache[3] = 3 - - self.assertEqual(2, len(cache)) - self.assertEqual(1, cache[1]) - self.assertNotIn(2, cache) - self.assertEqual(3, cache[3]) - def test_lru(self): cache = self.cache(maxsize=2) @@ -56,63 +31,137 @@ class TTLCacheTest(unittest.TestCase, CacheTestMixin, DecoratorTestMixin): cache[3] = 3 self.assertEqual(len(cache), 2) + self.assertNotIn(1, cache) self.assertEqual(cache[2], 2) self.assertEqual(cache[3], 3) - self.assertNotIn(1, cache) cache[2] cache[4] = 4 self.assertEqual(len(cache), 2) + self.assertNotIn(1, cache) self.assertEqual(cache[2], 2) - self.assertEqual(cache[4], 4) self.assertNotIn(3, cache) + self.assertEqual(cache[4], 4) cache[5] = 5 self.assertEqual(len(cache), 2) + self.assertNotIn(1, cache) + self.assertNotIn(2, cache) + self.assertNotIn(3, cache) self.assertEqual(cache[4], 4) self.assertEqual(cache[5], 5) - self.assertNotIn(2, cache) - def test_expire(self): - cache = self.cache(maxsize=3, ttl=0) - self.assertEqual(cache.ttl, 0) + def test_ttl(self): + cache = self.cache(maxsize=2, ttl=1) + self.assertEqual(1, cache.ttl) cache[1] = 1 + self.assertEqual({1}, set(cache)) + self.assertEqual(1, len(cache)) + self.assertEqual(1, cache.currsize) self.assertEqual(1, cache[1]) + cache.timer.tick() - with self.assertRaises(KeyError): - cache[1] + self.assertEqual({1}, set(cache)) + self.assertEqual(1, len(cache)) + self.assertEqual(1, cache.currsize) + self.assertEqual(1, cache[1]) + cache[2] = 2 + self.assertEqual({1, 2}, set(cache)) + self.assertEqual(2, len(cache)) + self.assertEqual(2, cache.currsize) + self.assertEqual(1, cache[1]) self.assertEqual(2, cache[2]) + cache.timer.tick() - with self.assertRaises(KeyError): - cache[2] + self.assertEqual({2}, set(cache)) + self.assertEqual(1, len(cache)) + self.assertEqual(1, cache.currsize) + self.assertNotIn(1, cache) + self.assertEqual(2, cache[2]) + cache[3] = 3 + self.assertEqual({2, 3}, set(cache)) + self.assertEqual(2, len(cache)) + self.assertEqual(2, cache.currsize) + self.assertNotIn(1, cache) + self.assertEqual(2, cache[2]) self.assertEqual(3, cache[3]) - cache.expire(1) + cache.timer.tick() + self.assertEqual({3}, set(cache)) + self.assertEqual(1, len(cache)) + self.assertEqual(1, cache.currsize) self.assertNotIn(1, cache) + self.assertNotIn(2, cache) self.assertEqual(3, cache[3]) - cache.expire(2) + cache.timer.tick() + self.assertEqual(set(), set(cache)) + self.assertEqual(0, len(cache)) + self.assertEqual(0, cache.currsize) self.assertNotIn(1, cache) self.assertNotIn(2, cache) - self.assertEqual(3, cache[3]) + self.assertNotIn(3, cache) + + def test_expire(self): + cache = self.cache(maxsize=3, ttl=2) + self.assertEqual(2, cache.ttl) + cache[1] = 1 cache.timer.tick() + cache[2] = 2 + cache.timer.tick() + cache[3] = 3 + self.assertEqual(2, cache.timer()) + + self.assertEqual({1, 2, 3}, set(cache)) + self.assertEqual(3, len(cache)) + self.assertEqual(3, cache.currsize) + self.assertEqual(1, cache[1]) + self.assertEqual(2, cache[2]) + self.assertEqual(3, cache[3]) + cache.expire() + self.assertEqual({1, 2, 3}, set(cache)) + self.assertEqual(3, len(cache)) + self.assertEqual(3, cache.currsize) + self.assertEqual(1, cache[1]) + self.assertEqual(2, cache[2]) + self.assertEqual(3, cache[3]) + + cache.expire(3) + self.assertEqual({2, 3}, set(cache)) + self.assertEqual(2, len(cache)) + self.assertEqual(2, cache.currsize) + self.assertNotIn(1, cache) + self.assertEqual(2, cache[2]) + self.assertEqual(3, cache[3]) + + cache.expire(4) + self.assertEqual({3}, set(cache)) + self.assertEqual(1, len(cache)) + self.assertEqual(1, cache.currsize) + self.assertNotIn(1, cache) + self.assertNotIn(2, cache) + self.assertEqual(3, cache[3]) + + cache.expire(5) + self.assertEqual(set(), set(cache)) self.assertEqual(0, len(cache)) + self.assertEqual(0, cache.currsize) self.assertNotIn(1, cache) self.assertNotIn(2, cache) self.assertNotIn(3, cache) def test_tuple_key(self): cache = self.cache(maxsize=1, ttl=0) + self.assertEqual(0, cache.ttl) cache[(1, 2, 3)] = 42 self.assertEqual(42, cache[(1, 2, 3)]) cache.timer.tick() with self.assertRaises(KeyError): cache[(1, 2, 3)] - cache.expire() self.assertNotIn((1, 2, 3), cache) -- cgit v1.2.3 From 5385445fa6dfb94e3ab457890bcb9ec94134e997 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Wed, 12 Nov 2014 07:58:59 +0100 Subject: Prepare v0.7.0 --- CHANGES.rst | 4 ++-- README.rst | 6 +++--- docs/index.rst | 65 ++++++++++++++++++++++++++++++++++++---------------------- 3 files changed, 46 insertions(+), 29 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index b4a2b4a..1084110 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,4 +1,4 @@ -0.7.0 UNRELEASED +0.7.0 2014-11-12 ---------------- - Deprecate ``TTLCache.ExpiredError``. @@ -7,7 +7,7 @@ - Refactor ``LFUCache``, ``LRUCache`` and ``TTLCache``. -- Always use custom ``NullContext`` implementation for unsynchronized +- Use custom ``NullContext`` implementation for unsynchronized function decorators. diff --git a/README.rst b/README.rst index c14e124..770c20d 100644 --- a/README.rst +++ b/README.rst @@ -2,8 +2,8 @@ cachetools ======================================================================== This module provides various memoizing collections and decorators, -including a variant of the Python 3 Standard Library -``functools.lru_cache`` function decorator. +including a variant of the Python 3 Standard Library `@lru_cache`_ +function decorator. .. code-block:: pycon @@ -76,7 +76,7 @@ Copyright (c) 2014 Thomas Kemmer. Licensed under the `MIT License`_. -.. _functools.lru_cache: http://docs.python.org/3.4/library/functools.html#functools.lru_cache +.. _@lru_cache: http://docs.python.org/3/library/functools.html#functools.lru_cache .. _mutable: http://docs.python.org/dev/glossary.html#term-mutable .. _mapping: http://docs.python.org/dev/glossary.html#term-mapping .. _cache algorithm: http://en.wikipedia.org/wiki/Cache_algorithms diff --git a/docs/index.rst b/docs/index.rst index 02455f9..2428956 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -4,8 +4,8 @@ .. module:: cachetools This module provides various memoizing collections and decorators, -including a variant of the Python 3 Standard Library -:func:`functools.lru_cache` function decorator. +including a variant of the Python 3 Standard Library `@lru_cache`_ +function decorator. .. code-block:: pycon @@ -57,10 +57,10 @@ of one argument used to retrieve the size of an item's value. .. autoclass:: Cache :members: -.. autoclass:: LRUCache +.. autoclass:: LFUCache :members: -.. autoclass:: LFUCache +.. autoclass:: LRUCache :members: .. autoclass:: RRCache @@ -77,11 +77,11 @@ of one argument used to retrieve the size of an item's value. from cachetools import TTLCache import time - cache = TTLCache(maxsize=100, ttl=1) + cache = TTLCache(maxsize=100, ttl=1.0) cache.update({1: 1, 2: 2, 3: 3}) - time.sleep(1) for key in cache: + time.sleep(0.5) try: print(cache[key]) except KeyError: @@ -93,11 +93,27 @@ Function Decorators This module provides several memoizing function decorators compatible with -- though not necessarily as efficient as -- the Python 3 -Standard Library :func:`functools.lru_cache` decorator. +Standard Library :func:`functools.lru_cache` decorator:: + + import cachetools + import urllib.request -In addition to a `maxsize` parameter, all decorators feature optional -arguments, which should be specified as keyword arguments for -compatibility with future extensions: + @cachetools.lru_cache(maxsize=32) + def get_pep(num): + """Retrieve text of a Python Enhancement Proposal""" + url = 'http://www.python.org/dev/peps/pep-%04d/' % num + with urllib.request.urlopen(url) as s: + return s.read() + + for n in 8, 290, 308, 320, 8, 218, 320, 279, 289, 320, 9991: + try: + print(n, len(get_pep(n))) + except urllib.error.HTTPError: + print(n, 'Not Found') + print(get_pep.cache_info()) + +In addition to a `maxsize` parameter, all decorators feature some +optional keyword arguments: - `typed`, if is set to :const:`True`, will cause function arguments of different types to be cached separately. @@ -120,12 +136,6 @@ documentation for details. Unlike :func:`functools.lru_cache`, setting `maxsize` to zero or :const:`None` is not supported. -.. decorator:: rr_cache(maxsize=128, choice=random.choice, typed=False, getsizeof=None, lock=threading.RLock) - - Decorator that wraps a function with a memoizing callable that - saves up to `maxsize` results based on a Random Replacement (RR) - algorithm. - .. decorator:: lfu_cache(maxsize=128, typed=False, getsizeof=None, lock=threading.RLock) Decorator that wraps a function with a memoizing callable that @@ -138,6 +148,12 @@ Unlike :func:`functools.lru_cache`, setting `maxsize` to zero or saves up to `maxsize` results based on a Least Recently Used (LRU) algorithm. +.. decorator:: rr_cache(maxsize=128, choice=random.choice, typed=False, getsizeof=None, lock=threading.RLock) + + Decorator that wraps a function with a memoizing callable that + saves up to `maxsize` results based on a Random Replacement (RR) + algorithm. + .. decorator:: ttl_cache(maxsize=128, ttl=600, timer=time.time, typed=False, getsizeof=None, lock=threading.RLock) Decorator to wrap a function with a memoizing callable that saves @@ -172,20 +188,21 @@ Method Decorators class CachedPEPs(object): - def __init__(self, cachesize): - self.cache = LRUCache(maxsize=cachesize) + def __init__(self, cachesize): + self.cache = LRUCache(maxsize=cachesize) - @cachedmethod(operator.attrgetter('cache')) - def get(self, num): - """Retrieve text of a Python Enhancement Proposal""" - url = 'http://www.python.org/dev/peps/pep-%04d/' % num - with urllib.request.urlopen(url) as s: - return s.read() + @cachedmethod(operator.attrgetter('cache')) + def get(self, num): + """Retrieve text of a Python Enhancement Proposal""" + url = 'http://www.python.org/dev/peps/pep-%04d/' % num + with urllib.request.urlopen(url) as s: + return s.read() peps = CachedPEPs(cachesize=10) print("PEP #1: %s" % peps.get(1)) +.. _@lru_cache: http://docs.python.org/3/library/functools.html#functools.lru_cache .. _mutable: http://docs.python.org/dev/glossary.html#term-mutable .. _mapping: http://docs.python.org/dev/glossary.html#term-mapping .. _cache algorithm: http://en.wikipedia.org/wiki/Cache_algorithms -- cgit v1.2.3 From 827e2f53efcebe2ad11d52d5dbb6e049d1ae7edb Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Sat, 22 Nov 2014 21:20:49 +0100 Subject: Fix #25: Fix MANIFEST.in --- CHANGES.rst | 6 ++++++ MANIFEST.in | 4 +++- cachetools/__init__.py | 2 +- 3 files changed, 10 insertions(+), 2 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 1084110..62f7279 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,9 @@ +0.7.1 2014-11-22 +---------------- + +- Fix `MANIFEST.in`. + + 0.7.0 2014-11-12 ---------------- diff --git a/MANIFEST.in b/MANIFEST.in index c9346d9..8ea55f2 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,4 +1,6 @@ -include Changes +include .coveragerc +include .travis.yml +include CHANGES.rst include LICENSE include MANIFEST.in include README.rst diff --git a/cachetools/__init__.py b/cachetools/__init__.py index 1da08fc..139ed1d 100644 --- a/cachetools/__init__.py +++ b/cachetools/__init__.py @@ -14,4 +14,4 @@ __all__ = ( 'cachedmethod' ) -__version__ = '0.7.0' +__version__ = '0.7.1' -- cgit v1.2.3 From 8c1f8c74320b3927dc194e8fb9f89b8ab2b5e240 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Mon, 24 Nov 2014 05:38:37 +0100 Subject: Add wheel support. --- MANIFEST.in | 2 -- setup.cfg | 3 +++ 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/MANIFEST.in b/MANIFEST.in index 8ea55f2..8b3684f 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,5 +1,3 @@ -include .coveragerc -include .travis.yml include CHANGES.rst include LICENSE include MANIFEST.in diff --git a/setup.cfg b/setup.cfg index 9ccea7a..01a6a79 100644 --- a/setup.cfg +++ b/setup.cfg @@ -8,3 +8,6 @@ all_files = 1 [upload_sphinx] upload-dir = docs/_build/html + +[wheel] +universal = 1 -- cgit v1.2.3 From f240afdd5bbb7b34e901f1b0a4bacf59c8de2919 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Mon, 24 Nov 2014 05:48:07 +0100 Subject: Package cleanup. --- .coveragerc | 5 ----- .travis.yml | 15 +++++++-------- setup.cfg | 12 ++++++++---- setup.py | 15 ++++++++------- 4 files changed, 23 insertions(+), 24 deletions(-) delete mode 100644 .coveragerc diff --git a/.coveragerc b/.coveragerc deleted file mode 100644 index e77617c..0000000 --- a/.coveragerc +++ /dev/null @@ -1,5 +0,0 @@ -[report] -omit = - */pyshared/* - */python?.?/* - */site-packages/nose/* diff --git a/.travis.yml b/.travis.yml index fd9887b..81a8f8a 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,13 +1,12 @@ language: python python: - - 2.7 - - 3.2 - - 3.3 - - 3.4 +- 2.7 +- 3.2 +- 3.3 +- 3.4 install: - - pip install . - - pip install coverage coveralls +- pip install . coverage coveralls script: - - nosetests --with-coverage --cover-package=cachetools +- python setup.py nosetests after_success: - - coveralls +- coveralls diff --git a/setup.cfg b/setup.cfg index 01a6a79..581f1ba 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,5 +1,12 @@ +[bdist_wheel] +universal = 1 + [flake8] -exclude = .git,docs +exclude = .git,docs,setup.py + +[nosetests] +with-coverage = 1 +cover-package = cachetools [build_sphinx] source-dir = docs/ @@ -8,6 +15,3 @@ all_files = 1 [upload_sphinx] upload-dir = docs/_build/html - -[wheel] -universal = 1 diff --git a/setup.py b/setup.py index 07fef3f..22ce031 100644 --- a/setup.py +++ b/setup.py @@ -1,20 +1,21 @@ +import os.path, codecs, re + from setuptools import setup -def get_version(filename): - import re - content = open(filename).read() - metadata = dict(re.findall("__([a-z]+)__ = '([^']+)'", content)) - return metadata['version'] +with codecs.open(os.path.join(os.path.dirname(__file__), 'cachetools', '__init__.py'), + encoding='utf8') as f: + metadata = dict(re.findall(r"__([a-z]+)__ = '([^']+)", f.read())) + setup( name='cachetools', - version=get_version('cachetools/__init__.py'), + version=metadata['version'], author='Thomas Kemmer', author_email='tkemmer@computer.org', url='https://github.com/tkem/cachetools', license='MIT', - description='Extensible memoizing collections and decorators', # noqa + description='Extensible memoizing collections and decorators', long_description=open('README.rst').read(), keywords='cache caching LRU LFU TTL', classifiers=[ -- cgit v1.2.3 From 87d60895ddd631a8e135bfc37533bf3d83c9a418 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Wed, 3 Dec 2014 12:04:40 +0100 Subject: Bump version number for TestPyPI. --- cachetools/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cachetools/__init__.py b/cachetools/__init__.py index 139ed1d..b3d8d21 100644 --- a/cachetools/__init__.py +++ b/cachetools/__init__.py @@ -14,4 +14,4 @@ __all__ = ( 'cachedmethod' ) -__version__ = '0.7.1' +__version__ = '0.8.0alpha' -- cgit v1.2.3 From ebd7920290b5a038226a158e31a40f191331a7ee Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Mon, 1 Dec 2014 12:36:01 +0100 Subject: Add Cache.__missing__() --- cachetools/cache.py | 71 ++++++++++++++++++++++++++++++++++++++++---------- cachetools/lfucache.py | 6 ++--- cachetools/lrucache.py | 14 ++++++---- cachetools/rrcache.py | 4 +-- cachetools/ttlcache.py | 33 ++++++++++++++++------- docs/index.rst | 42 ++++++++++++++++++++++++----- tests/__init__.py | 60 +++++++++++++++++++++++++++++++++++++++++- tests/test_cache.py | 4 +-- tests/test_lfucache.py | 4 +-- tests/test_lrucache.py | 4 +-- tests/test_rrcache.py | 4 +-- tests/test_ttlcache.py | 4 +-- 12 files changed, 199 insertions(+), 51 deletions(-) diff --git a/cachetools/cache.py b/cachetools/cache.py index a0802b8..e600f5d 100644 --- a/cachetools/cache.py +++ b/cachetools/cache.py @@ -21,11 +21,12 @@ class Cache(collections.MutableMapping): """ - def __init__(self, maxsize, getsizeof=None): - self.__mapping = dict() + def __init__(self, maxsize, missing=None, getsizeof=None): + self.__data = dict() + self.__currsize = 0 self.__maxsize = maxsize + self.__missing = missing self.__getsizeof = getsizeof or one - self.__currsize = 0 def __repr__(self): return '%s(%r, maxsize=%d, currsize=%d)' % ( @@ -36,31 +37,48 @@ class Cache(collections.MutableMapping): ) def __getitem__(self, key): - return self.__mapping[key][0] + try: + return self.__data[key][0] + except KeyError: + return self.__missing__(key) def __setitem__(self, key, value): - mapping = self.__mapping + data = self.__data maxsize = self.__maxsize size = self.__getsizeof(value) if size > maxsize: raise ValueError('value too large') - if key not in mapping or mapping[key][1] < size: + if key not in data or data[key][1] < size: while self.__currsize + size > maxsize: self.popitem() - if key in mapping: - self.__currsize -= mapping[key][1] - mapping[key] = (value, size) - self.__currsize += size + if key in data: + diffsize = size - data[key][1] + else: + diffsize = size + data[key] = (value, size) + self.__currsize += diffsize def __delitem__(self, key): - _, size = self.__mapping.pop(key) + _, size = self.__data.pop(key) self.__currsize -= size + def __contains__(self, key): + return key in self.__data + + def __missing__(self, key): + missing = self.__missing + if missing: + # return value as stored in data! + self.__setitem__(key, missing(key)) + return self.__data[key][0] + else: + raise KeyError(key) + def __iter__(self): - return iter(self.__mapping) + return iter(self.__data) def __len__(self): - return len(self.__mapping) + return len(self.__data) @property def maxsize(self): @@ -78,4 +96,29 @@ class Cache(collections.MutableMapping): def _getitemsize(self, key): # TODO: decide on interface, make public - return self.__mapping[key][1] + return self.__data[key][1] + + # collections.MutableMapping mixin methods do not handle __missing__ + + def get(self, key, default=None): + if key in self: + return self[key] + else: + return default + + __marker = object() + + def pop(self, key, default=__marker): + if key in self: + value = self[key] + del self[key] + return value + elif default is self.__marker: + raise KeyError(key) + else: + return default + + def setdefault(self, key, default=None): + if key not in self: + self[key] = default + return self[key] diff --git a/cachetools/lfucache.py b/cachetools/lfucache.py index 58b7712..b24094a 100644 --- a/cachetools/lfucache.py +++ b/cachetools/lfucache.py @@ -14,8 +14,8 @@ class LFUCache(Cache): """ - def __init__(self, maxsize, getsizeof=None): - Cache.__init__(self, maxsize, getsizeof=getsizeof) + def __init__(self, maxsize, missing=None, getsizeof=None): + Cache.__init__(self, maxsize, missing=missing, getsizeof=getsizeof) self.__counter = collections.Counter() def __getitem__(self, key, cache_getitem=Cache.__getitem__): @@ -34,7 +34,7 @@ class LFUCache(Cache): def popitem(self): """Remove and return the `(key, value)` pair least frequently used.""" try: - key, _ = min(self.__counter.items(), key=operator.itemgetter(1)) + key = min(self.__counter.items(), key=operator.itemgetter(1))[0] except ValueError: raise KeyError('cache is empty') return key, self.pop(key) diff --git a/cachetools/lrucache.py b/cachetools/lrucache.py index 2d68077..f697ce3 100644 --- a/cachetools/lrucache.py +++ b/cachetools/lrucache.py @@ -22,12 +22,12 @@ class LRUCache(Cache): """ - def __init__(self, maxsize, getsizeof=None): + def __init__(self, maxsize, missing=None, getsizeof=None): if getsizeof is not None: - Cache.__init__(self, maxsize, lambda link: getsizeof(link.value)) + Cache.__init__(self, maxsize, missing=missing, getsizeof=lambda link: getsizeof(link.value)) self.getsizeof = getsizeof else: - Cache.__init__(self, maxsize) + Cache.__init__(self, maxsize, missing=missing) self.__root = root = Link() root.prev = root.next = root @@ -52,11 +52,12 @@ class LRUCache(Cache): return link.value def __setitem__(self, key, value, + cache_contains=Cache.__contains__, cache_getitem=Cache.__getitem__, cache_setitem=Cache.__setitem__): - try: + if cache_contains(self, key): oldlink = cache_getitem(self, key) - except KeyError: + else: oldlink = None link = Link() link.key = key @@ -69,8 +70,11 @@ class LRUCache(Cache): tail.next = root.prev = link def __delitem__(self, key, + cache_contains=Cache.__contains__, cache_getitem=Cache.__getitem__, cache_delitem=Cache.__delitem__): + if not cache_contains(self, key): + raise KeyError(key) link = cache_getitem(self, key) cache_delitem(self, key) link.unlink() diff --git a/cachetools/rrcache.py b/cachetools/rrcache.py index 5116775..f1badb9 100644 --- a/cachetools/rrcache.py +++ b/cachetools/rrcache.py @@ -18,8 +18,8 @@ class RRCache(Cache): """ - def __init__(self, maxsize, choice=random.choice, getsizeof=None): - Cache.__init__(self, maxsize, getsizeof=getsizeof) + def __init__(self, maxsize, choice=random.choice, missing=None, getsizeof=None): + Cache.__init__(self, maxsize, missing=missing, getsizeof=getsizeof) self.__choice = choice def popitem(self): diff --git a/cachetools/ttlcache.py b/cachetools/ttlcache.py index f11a577..e348965 100644 --- a/cachetools/ttlcache.py +++ b/cachetools/ttlcache.py @@ -44,11 +44,11 @@ class TTLCache(Cache): ExpiredError = KeyError # deprecated - def __init__(self, maxsize, ttl, timer=time.time, getsizeof=None): + def __init__(self, maxsize, ttl, timer=time.time, missing=None, getsizeof=None): if getsizeof is None: - Cache.__init__(self, maxsize) + Cache.__init__(self, maxsize, missing=missing) else: - Cache.__init__(self, maxsize, lambda e: getsizeof(e.value)) + Cache.__init__(self, maxsize, missing=missing, getsizeof=lambda e: getsizeof(e.value)) self.getsizeof = getsizeof self.__root = root = Link() root.ttl_prev = root.ttl_next = root @@ -65,10 +65,12 @@ class TTLCache(Cache): self.currsize, ) - def __getitem__(self, key, cache_getitem=Cache.__getitem__): + def __getitem__(self, key, + cache_getitem=Cache.__getitem__, + cache_missing=Cache.__missing__): link = cache_getitem(self, key) if link.expire < self.__timer(): - raise KeyError(key) + return cache_missing(self, key).value next = link.lru_next prev = link.lru_prev prev.lru_next = next @@ -79,13 +81,14 @@ class TTLCache(Cache): return link.value def __setitem__(self, key, value, + cache_contains=Cache.__contains__, cache_getitem=Cache.__getitem__, cache_setitem=Cache.__setitem__): time = self.__timer() self.expire(time) - try: + if cache_contains(self, key): oldlink = cache_getitem(self, key) - except KeyError: + else: oldlink = None link = Link() link.key = key @@ -94,8 +97,7 @@ class TTLCache(Cache): cache_setitem(self, key, link) if oldlink: oldlink.unlink() - root = self.__root - link.ttl_next = root + link.ttl_next = root = self.__root link.ttl_prev = tail = root.ttl_prev tail.ttl_next = root.ttl_prev = link link.lru_next = root @@ -103,13 +105,26 @@ class TTLCache(Cache): tail.lru_next = root.lru_prev = link def __delitem__(self, key, + cache_contains=Cache.__contains__, cache_getitem=Cache.__getitem__, cache_delitem=Cache.__delitem__): + if not cache_contains(self, key): + raise KeyError(key) link = cache_getitem(self, key) cache_delitem(self, key) link.unlink() self.expire() + def __contains__(self, key, + cache_contains=Cache.__contains__, + cache_getitem=Cache.__getitem__): + if not cache_contains(self, key): + return False + elif cache_getitem(self, key).expire < self.__timer(): + return False + else: + return True + def __iter__(self): timer = self.__timer root = self.__root diff --git a/docs/index.rst b/docs/index.rst index 2428956..9041ce9 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -24,11 +24,11 @@ function decorator. LRUCache([('second', 2), ('fourth', 4)], maxsize=2, currsize=2) For the purpose of this module, a *cache* is a mutable_ mapping_ of a -fixed maximum size. When the cache is full, i.e. the size of the -cache would exceed its maximum size, the cache must choose which -item(s) to discard based on a suitable `cache algorithm`_. A cache's -size is the sum of the size of its items, and an item's size in -general is a property or function of its value, e.g. the result of +fixed maximum size. When the cache is full, i.e. by adding another +item the cache would exceed its maximum size, the cache must choose +which item(s) to discard based on a suitable `cache algorithm`_. A +cache's size is the sum of the size of its items, and an item's size +in general is a property or function of its value, e.g. the result of :func:`sys.getsizeof`, or :func:`len` for string and sequence values. This module provides multiple cache implementations based on different @@ -46,11 +46,39 @@ different cache algorithms. All these classes derive from class :attr:`maxsize` and :attr:`currsize` to retrieve the maximum and current size of the cache. +All cache classes accept an optional `missing` keyword argument in +their constructor, which can be used to provide a default or factory +function. If a key `key` is not present, the ``cache[key]`` operation +calls :meth:`Cache.__missing__`, which in turn calls `missing` with +`key` as argument. The cache will then store the object returned from +`missing(key)` as the new cache value for `key`, possibly discarding +other items if the cache is full. This may be used to easily provide +caching for existing single-argument functions, for example:: + + from cachetools import LRUCache + import urllib.request + + def get_pep(num): + """Retrieve text of a Python Enhancement Proposal""" + url = 'http://www.python.org/dev/peps/pep-%04d/' % num + with urllib.request.urlopen(url) as s: + return s.read() + + cache = LRUCache(maxsize=4, missing=get_pep) + + for n in 8, 9, 290, 308, 320, 8, 218, 320, 279, 289, 320, 9991: + try: + print(n, len(cache[n])) + except urllib.error.HTTPError: + print(n, 'Not Found') + print(sorted(cache.keys())) + + :class:`Cache` also features a :meth:`getsizeof` method, which returns the size of a given item. The default implementation of :meth:`getsizeof` returns :const:`1` irrespective of its `value` argument, making the cache's size equal to the number of its items, or -`len(cache)`. For convenience, all cache classes accept an optional +``len(cache)``. For convenience, all cache classes accept an optional named constructor parameter `getsizeof`, which may specify a function of one argument used to retrieve the size of an item's value. @@ -98,7 +126,7 @@ Standard Library :func:`functools.lru_cache` decorator:: import cachetools import urllib.request - @cachetools.lru_cache(maxsize=32) + @cachetools.lru_cache(maxsize=4) def get_pep(num): """Retrieve text of a Python Enhancement Proposal""" url = 'http://www.python.org/dev/peps/pep-%04d/' % num diff --git a/tests/__init__.py b/tests/__init__.py index 2a1c7c2..9d1e61d 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -1,6 +1,6 @@ class CacheTestMixin(object): - def cache(self, maxsize, getsizeof=None): + def cache(self, maxsize, missing=None, getsizeof=None): raise NotImplementedError def test_cache_defaults(self): @@ -67,6 +67,12 @@ class CacheTestMixin(object): self.assertNotIn(1, cache) self.assertNotIn(2, cache) + with self.assertRaises(KeyError): + del cache[1] + self.assertEqual(0, len(cache)) + self.assertNotIn(1, cache) + self.assertNotIn(2, cache) + def test_cache_pop(self): cache = self.cache(maxsize=2) @@ -99,6 +105,58 @@ class CacheTestMixin(object): with self.assertRaises(KeyError): cache.popitem() + def test_cache_missing(self): + cache = self.cache(maxsize=2, missing=lambda x: x) + + self.assertEqual(0, len(cache)) + self.assertEqual(1, cache[1]) + self.assertEqual(2, cache[2]) + self.assertEqual(2, len(cache)) + self.assertTrue(1 in cache and 2 in cache) + + self.assertEqual(3, cache[3]) + self.assertEqual(2, len(cache)) + self.assertTrue(3 in cache) + self.assertTrue(1 in cache or 2 in cache) + self.assertTrue(1 not in cache or 2 not in cache) + + self.assertEqual(4, cache[4]) + self.assertEqual(2, len(cache)) + self.assertTrue(4 in cache) + self.assertTrue(1 in cache or 2 in cache or 3 in cache) + + # verify __missing__() is *not* called for any operations + # besides __getitem__() + + self.assertEqual(4, cache.get(4)) + self.assertEqual(None, cache.get(5)) + self.assertEqual(5 * 5, cache.get(5, 5 * 5)) + self.assertEqual(2, len(cache)) + + self.assertEqual(4, cache.pop(4)) + with self.assertRaises(KeyError): + cache.pop(5) + self.assertEqual(None, cache.pop(5, None)) + self.assertEqual(5 * 5, cache.pop(5, 5 * 5)) + self.assertEqual(1, len(cache)) + + cache.clear() + cache[1] = 1 + 1 + self.assertEqual(1 + 1, cache.setdefault(1)) + self.assertEqual(1 + 1, cache.setdefault(1, 1)) + self.assertEqual(1 + 1, cache[1]) + self.assertEqual(2 + 2, cache.setdefault(2, 2 + 2)) + self.assertEqual(2 + 2, cache.setdefault(2, None)) + self.assertEqual(2 + 2, cache.setdefault(2)) + self.assertEqual(2 + 2, cache[2]) + self.assertEqual(2, len(cache)) + self.assertTrue(1 in cache and 2 in cache) + self.assertEqual(None, cache.setdefault(3)) + self.assertEqual(2, len(cache)) + self.assertTrue(3 in cache) + self.assertTrue(1 in cache or 2 in cache) + self.assertTrue(1 not in cache or 2 not in cache) + def test_cache_getsizeof(self): cache = self.cache(maxsize=3, getsizeof=lambda x: x) self.assertEqual(3, cache.maxsize) diff --git a/tests/test_cache.py b/tests/test_cache.py index d68f676..433a733 100644 --- a/tests/test_cache.py +++ b/tests/test_cache.py @@ -6,5 +6,5 @@ from cachetools import Cache class CacheTest(unittest.TestCase, CacheTestMixin): - def cache(self, maxsize, getsizeof=None): - return Cache(maxsize, getsizeof=getsizeof) + def cache(self, maxsize, missing=None, getsizeof=None): + return Cache(maxsize, missing=missing, getsizeof=getsizeof) diff --git a/tests/test_lfucache.py b/tests/test_lfucache.py index c2e1bb7..3b69647 100644 --- a/tests/test_lfucache.py +++ b/tests/test_lfucache.py @@ -6,8 +6,8 @@ from cachetools import LFUCache, lfu_cache class LFUCacheTest(unittest.TestCase, CacheTestMixin, DecoratorTestMixin): - def cache(self, maxsize, getsizeof=None): - return LFUCache(maxsize, getsizeof) + def cache(self, maxsize, missing=None, getsizeof=None): + return LFUCache(maxsize, missing=missing, getsizeof=getsizeof) def decorator(self, maxsize, typed=False, lock=None): return lfu_cache(maxsize, typed=typed, lock=lock) diff --git a/tests/test_lrucache.py b/tests/test_lrucache.py index bfcb9d7..f21170d 100644 --- a/tests/test_lrucache.py +++ b/tests/test_lrucache.py @@ -6,8 +6,8 @@ from cachetools import LRUCache, lru_cache class LRUCacheTest(unittest.TestCase, CacheTestMixin, DecoratorTestMixin): - def cache(self, maxsize, getsizeof=None): - return LRUCache(maxsize, getsizeof) + def cache(self, maxsize, missing=None, getsizeof=None): + return LRUCache(maxsize, missing=missing, getsizeof=getsizeof) def decorator(self, maxsize, typed=False, lock=None): return lru_cache(maxsize, typed=typed, lock=lock) diff --git a/tests/test_rrcache.py b/tests/test_rrcache.py index 7713e5a..5a40b7a 100644 --- a/tests/test_rrcache.py +++ b/tests/test_rrcache.py @@ -7,8 +7,8 @@ from cachetools import RRCache, rr_cache class RRCacheTest(unittest.TestCase, CacheTestMixin, DecoratorTestMixin): - def cache(self, maxsize, choice=random.choice, getsizeof=None): - return RRCache(maxsize, choice=choice, getsizeof=getsizeof) + def cache(self, maxsize, choice=random.choice, missing=None, getsizeof=None): + return RRCache(maxsize, choice=choice, missing=missing, getsizeof=getsizeof) def decorator(self, maxsize, choice=random.choice, typed=False, lock=None): return rr_cache(maxsize, choice=choice, typed=typed, lock=lock) diff --git a/tests/test_ttlcache.py b/tests/test_ttlcache.py index ef36446..35227b2 100644 --- a/tests/test_ttlcache.py +++ b/tests/test_ttlcache.py @@ -17,8 +17,8 @@ class Timer: class TTLCacheTest(unittest.TestCase, CacheTestMixin, DecoratorTestMixin): - def cache(self, maxsize, ttl=0, getsizeof=None): - return TTLCache(maxsize, ttl, timer=Timer(), getsizeof=getsizeof) + def cache(self, maxsize, ttl=0, missing=None, getsizeof=None): + return TTLCache(maxsize, ttl, timer=Timer(), missing=missing, getsizeof=getsizeof) def decorator(self, maxsize, ttl=0, typed=False, lock=None): return ttl_cache(maxsize, ttl, timer=Timer(), typed=typed, lock=lock) -- cgit v1.2.3 From 08a83b3d3f80ef98ec1b563680955afdeff0006a Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Wed, 3 Dec 2014 12:07:44 +0100 Subject: Update CHANGES.rst --- CHANGES.rst | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/CHANGES.rst b/CHANGES.rst index 62f7279..0b590dc 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,10 @@ +0.8.0 UNRELEASED + +---------------- + +- Add ``Cache.__missing__()``. + + 0.7.1 2014-11-22 ---------------- -- cgit v1.2.3 From 98e32c43f1b50ec9981f908260d1bc787068d727 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Wed, 3 Dec 2014 12:14:36 +0100 Subject: Fix #13: Add Cache.getsize() --- CHANGES.rst | 2 ++ cachetools/cache.py | 8 ++++---- cachetools/ttlcache.py | 3 +-- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 0b590dc..c277dc3 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -4,6 +4,8 @@ - Add ``Cache.__missing__()``. +- Add ``Cache.getsize()``. + 0.7.1 2014-11-22 ---------------- diff --git a/cachetools/cache.py b/cachetools/cache.py index e600f5d..e26bd5b 100644 --- a/cachetools/cache.py +++ b/cachetools/cache.py @@ -90,14 +90,14 @@ class Cache(collections.MutableMapping): """Return the current size of the cache.""" return self.__currsize + def getsize(self, key): + """Return the size of the cache item with key `key`.""" + return self.__data[key][1] + def getsizeof(self, value): """Return the size of a cache element.""" return self.__getsizeof(value) - def _getitemsize(self, key): - # TODO: decide on interface, make public - return self.__data[key][1] - # collections.MutableMapping mixin methods do not handle __missing__ def get(self, key, default=None): diff --git a/cachetools/ttlcache.py b/cachetools/ttlcache.py index e348965..111ac05 100644 --- a/cachetools/ttlcache.py +++ b/cachetools/ttlcache.py @@ -175,13 +175,12 @@ class TTLCache(Cache): @property def currsize(self): - getsize = Cache._getitemsize # TODO: decide on final interface expired = 0 time = self.__timer() root = self.__root head = root.ttl_next while head is not root and head.expire < time: - expired += getsize(self, head.key) + expired += self.getsize(head.key) head = head.ttl_next return super(TTLCache, self).currsize - expired -- cgit v1.2.3 From a4f3125a837ebbcb1cc1e99351db0e1b1b7e6445 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Wed, 3 Dec 2014 12:16:47 +0100 Subject: Rename cache implementation source files. --- cachetools/__init__.py | 12 +-- cachetools/lfu.py | 49 ++++++++++++ cachetools/lfucache.py | 49 ------------ cachetools/lru.py | 100 ++++++++++++++++++++++++ cachetools/lrucache.py | 100 ------------------------ cachetools/rr.py | 41 ++++++++++ cachetools/rrcache.py | 41 ---------- cachetools/ttl.py | 204 +++++++++++++++++++++++++++++++++++++++++++++++++ cachetools/ttlcache.py | 204 ------------------------------------------------- 9 files changed, 400 insertions(+), 400 deletions(-) create mode 100644 cachetools/lfu.py delete mode 100644 cachetools/lfucache.py create mode 100644 cachetools/lru.py delete mode 100644 cachetools/lrucache.py create mode 100644 cachetools/rr.py delete mode 100644 cachetools/rrcache.py create mode 100644 cachetools/ttl.py delete mode 100644 cachetools/ttlcache.py diff --git a/cachetools/__init__.py b/cachetools/__init__.py index b3d8d21..32248f4 100644 --- a/cachetools/__init__.py +++ b/cachetools/__init__.py @@ -1,17 +1,17 @@ """Extensible memoizing collections and decorators""" from .cache import Cache -from .rrcache import RRCache, rr_cache -from .lfucache import LFUCache, lfu_cache -from .lrucache import LRUCache, lru_cache -from .ttlcache import TTLCache, ttl_cache from .decorators import cachedmethod +from .lfu import LFUCache, lfu_cache +from .lru import LRUCache, lru_cache +from .rr import RRCache, rr_cache +from .ttl import TTLCache, ttl_cache __all__ = ( 'Cache', - 'RRCache', 'LFUCache', 'LRUCache', 'TTLCache', - 'rr_cache', 'lfu_cache', 'lru_cache', 'ttl_cache', 'cachedmethod' + 'LFUCache', 'LRUCache', 'RRCache', 'TTLCache', + 'lfu_cache', 'lru_cache', 'rr_cache', 'ttl_cache', ) __version__ = '0.8.0alpha' diff --git a/cachetools/lfu.py b/cachetools/lfu.py new file mode 100644 index 0000000..b24094a --- /dev/null +++ b/cachetools/lfu.py @@ -0,0 +1,49 @@ +from .cache import Cache +from .decorators import cachedfunc +from .lock import RLock + +import collections +import operator + + +class LFUCache(Cache): + """Least Frequently Used (LFU) cache implementation. + + This class counts how often an item is retrieved, and discards the + items used least often to make space when necessary. + + """ + + def __init__(self, maxsize, missing=None, getsizeof=None): + Cache.__init__(self, maxsize, missing=missing, getsizeof=getsizeof) + self.__counter = collections.Counter() + + def __getitem__(self, key, cache_getitem=Cache.__getitem__): + value = cache_getitem(self, key) + self.__counter[key] += 1 + return value + + def __setitem__(self, key, value, cache_setitem=Cache.__setitem__): + cache_setitem(self, key, value) + self.__counter[key] += 1 + + def __delitem__(self, key, cache_delitem=Cache.__delitem__): + cache_delitem(self, key) + del self.__counter[key] + + def popitem(self): + """Remove and return the `(key, value)` pair least frequently used.""" + try: + key = min(self.__counter.items(), key=operator.itemgetter(1))[0] + except ValueError: + raise KeyError('cache is empty') + return key, self.pop(key) + + +def lfu_cache(maxsize=128, typed=False, getsizeof=None, lock=RLock): + """Decorator to wrap a function with a memoizing callable that saves + up to `maxsize` results based on a Least Frequently Used (LFU) + algorithm. + + """ + return cachedfunc(LFUCache(maxsize, getsizeof), typed, lock) diff --git a/cachetools/lfucache.py b/cachetools/lfucache.py deleted file mode 100644 index b24094a..0000000 --- a/cachetools/lfucache.py +++ /dev/null @@ -1,49 +0,0 @@ -from .cache import Cache -from .decorators import cachedfunc -from .lock import RLock - -import collections -import operator - - -class LFUCache(Cache): - """Least Frequently Used (LFU) cache implementation. - - This class counts how often an item is retrieved, and discards the - items used least often to make space when necessary. - - """ - - def __init__(self, maxsize, missing=None, getsizeof=None): - Cache.__init__(self, maxsize, missing=missing, getsizeof=getsizeof) - self.__counter = collections.Counter() - - def __getitem__(self, key, cache_getitem=Cache.__getitem__): - value = cache_getitem(self, key) - self.__counter[key] += 1 - return value - - def __setitem__(self, key, value, cache_setitem=Cache.__setitem__): - cache_setitem(self, key, value) - self.__counter[key] += 1 - - def __delitem__(self, key, cache_delitem=Cache.__delitem__): - cache_delitem(self, key) - del self.__counter[key] - - def popitem(self): - """Remove and return the `(key, value)` pair least frequently used.""" - try: - key = min(self.__counter.items(), key=operator.itemgetter(1))[0] - except ValueError: - raise KeyError('cache is empty') - return key, self.pop(key) - - -def lfu_cache(maxsize=128, typed=False, getsizeof=None, lock=RLock): - """Decorator to wrap a function with a memoizing callable that saves - up to `maxsize` results based on a Least Frequently Used (LFU) - algorithm. - - """ - return cachedfunc(LFUCache(maxsize, getsizeof), typed, lock) diff --git a/cachetools/lru.py b/cachetools/lru.py new file mode 100644 index 0000000..f697ce3 --- /dev/null +++ b/cachetools/lru.py @@ -0,0 +1,100 @@ +from .cache import Cache +from .decorators import cachedfunc +from .lock import RLock + + +class Link(object): + + __slots__ = 'key', 'value', 'prev', 'next' + + def unlink(self): + next = self.next + prev = self.prev + prev.next = next + next.prev = prev + + +class LRUCache(Cache): + """Least Recently Used (LRU) cache implementation. + + This class discards the least recently used items first to make + space when necessary. + + """ + + def __init__(self, maxsize, missing=None, getsizeof=None): + if getsizeof is not None: + Cache.__init__(self, maxsize, missing=missing, getsizeof=lambda link: getsizeof(link.value)) + self.getsizeof = getsizeof + else: + Cache.__init__(self, maxsize, missing=missing) + self.__root = root = Link() + root.prev = root.next = root + + def __repr__(self, cache_getitem=Cache.__getitem__): + # prevent item reordering + return '%s(%r, maxsize=%d, currsize=%d)' % ( + self.__class__.__name__, + [(key, cache_getitem(self, key).value) for key in self], + self.maxsize, + self.currsize, + ) + + def __getitem__(self, key, cache_getitem=Cache.__getitem__): + link = cache_getitem(self, key) + next = link.next + prev = link.prev + prev.next = next + next.prev = prev + link.next = root = self.__root + link.prev = tail = root.prev + tail.next = root.prev = link + return link.value + + def __setitem__(self, key, value, + cache_contains=Cache.__contains__, + cache_getitem=Cache.__getitem__, + cache_setitem=Cache.__setitem__): + if cache_contains(self, key): + oldlink = cache_getitem(self, key) + else: + oldlink = None + link = Link() + link.key = key + link.value = value + cache_setitem(self, key, link) + if oldlink: + oldlink.unlink() + link.next = root = self.__root + link.prev = tail = root.prev + tail.next = root.prev = link + + def __delitem__(self, key, + cache_contains=Cache.__contains__, + cache_getitem=Cache.__getitem__, + cache_delitem=Cache.__delitem__): + if not cache_contains(self, key): + raise KeyError(key) + link = cache_getitem(self, key) + cache_delitem(self, key) + link.unlink() + + def popitem(self): + """Remove and return the `(key, value)` pair least recently used.""" + root = self.__root + link = root.next + if link is root: + raise KeyError('cache is empty') + key = link.key + Cache.__delitem__(self, key) + link.unlink() + return (key, link.value) + + +def lru_cache(maxsize=128, typed=False, getsizeof=None, lock=RLock): + """Decorator to wrap a function with a memoizing callable that saves + up to `maxsize` results based on a Least Recently Used (LRU) + algorithm. + + """ + return cachedfunc(LRUCache(maxsize, getsizeof), typed, lock) diff --git a/cachetools/lrucache.py b/cachetools/lrucache.py deleted file mode 100644 index f697ce3..0000000 --- a/cachetools/lrucache.py +++ /dev/null @@ -1,100 +0,0 @@ -from .cache import Cache -from .decorators import cachedfunc -from .lock import RLock - - -class Link(object): - - __slots__ = 'key', 'value', 'prev', 'next' - - def unlink(self): - next = self.next - prev = self.prev - prev.next = next - next.prev = prev - - -class LRUCache(Cache): - """Least Recently Used (LRU) cache implementation. - - This class discards the least recently used items first to make - space when necessary. - - """ - - def __init__(self, maxsize, missing=None, getsizeof=None): - if getsizeof is not None: - Cache.__init__(self, maxsize, missing=missing, getsizeof=lambda link: getsizeof(link.value)) - self.getsizeof = getsizeof - else: - Cache.__init__(self, maxsize, missing=missing) - self.__root = root = Link() - root.prev = root.next = root - - def __repr__(self, cache_getitem=Cache.__getitem__): - # prevent item reordering - return '%s(%r, maxsize=%d, currsize=%d)' % ( - self.__class__.__name__, - [(key, cache_getitem(self, key).value) for key in self], - self.maxsize, - self.currsize, - ) - - def __getitem__(self, key, cache_getitem=Cache.__getitem__): - link = cache_getitem(self, key) - next = link.next - prev = link.prev - prev.next = next - next.prev = prev - link.next = root = self.__root - link.prev = tail = root.prev - tail.next = root.prev = link - return link.value - - def __setitem__(self, key, value, - cache_contains=Cache.__contains__, - cache_getitem=Cache.__getitem__, - cache_setitem=Cache.__setitem__): - if cache_contains(self, key): - oldlink = cache_getitem(self, key) - else: - oldlink = None - link = Link() - link.key = key - link.value = value - cache_setitem(self, key, link) - if oldlink: - oldlink.unlink() - link.next = root = self.__root - link.prev = tail = root.prev - tail.next = root.prev = link - - def __delitem__(self, key, - cache_contains=Cache.__contains__, - cache_getitem=Cache.__getitem__, - cache_delitem=Cache.__delitem__): - if not cache_contains(self, key): - raise KeyError(key) - link = cache_getitem(self, key) - cache_delitem(self, key) - link.unlink() - - def popitem(self): - """Remove and return the `(key, value)` pair least recently used.""" - root = self.__root - link = root.next - if link is root: - raise KeyError('cache is empty') - key = link.key - Cache.__delitem__(self, key) - link.unlink() - return (key, link.value) - - -def lru_cache(maxsize=128, typed=False, getsizeof=None, lock=RLock): - """Decorator to wrap a function with a memoizing callable that saves - up to `maxsize` results based on a Least Recently Used (LRU) - algorithm. - - """ - return cachedfunc(LRUCache(maxsize, getsizeof), typed, lock) diff --git a/cachetools/rr.py b/cachetools/rr.py new file mode 100644 index 0000000..f1badb9 --- /dev/null +++ b/cachetools/rr.py @@ -0,0 +1,41 @@ +from .cache import Cache +from .decorators import cachedfunc +from .lock import RLock + +import random + + +class RRCache(Cache): + """Random Replacement (RR) cache implementation. + + This class randomly selects candidate items and discards them to + make space when necessary. + + By default, items are selected from the list of cache keys using + :func:`random.choice`. The optional argument `choice` may specify + an alternative function that returns an arbitrary element from a + non-empty sequence. + + """ + + def __init__(self, maxsize, choice=random.choice, missing=None, getsizeof=None): + Cache.__init__(self, maxsize, missing=missing, getsizeof=getsizeof) + self.__choice = choice + + def popitem(self): + """Remove and return a random `(key, value)` pair.""" + try: + key = self.__choice(list(self)) + except IndexError: + raise KeyError('cache is empty') + return (key, self.pop(key)) + + +def rr_cache(maxsize=128, choice=random.choice, typed=False, getsizeof=None, + lock=RLock): + """Decorator to wrap a function with a memoizing callable that saves + up to `maxsize` results based on a Random Replacement (RR) + algorithm. + + """ + return cachedfunc(RRCache(maxsize, choice, getsizeof), typed, lock) diff --git a/cachetools/rrcache.py b/cachetools/rrcache.py deleted file mode 100644 index f1badb9..0000000 --- a/cachetools/rrcache.py +++ /dev/null @@ -1,41 +0,0 @@ -from .cache import Cache -from .decorators import cachedfunc -from .lock import RLock - -import random - - -class RRCache(Cache): - """Random Replacement (RR) cache implementation. - - This class randomly selects candidate items and discards them to - make space when necessary. - - By default, items are selected from the list of cache keys using - :func:`random.choice`. The optional argument `choice` may specify - an alternative function that returns an arbitrary element from a - non-empty sequence. - - """ - - def __init__(self, maxsize, choice=random.choice, missing=None, getsizeof=None): - Cache.__init__(self, maxsize, missing=missing, getsizeof=getsizeof) - self.__choice = choice - - def popitem(self): - """Remove and return a random `(key, value)` pair.""" - try: - key = self.__choice(list(self)) - except IndexError: - raise KeyError('cache is empty') - return (key, self.pop(key)) - - -def rr_cache(maxsize=128, choice=random.choice, typed=False, getsizeof=None, - lock=RLock): - """Decorator to wrap a function with a memoizing callable that saves - up to `maxsize` results based on a Random Replacement (RR) - algorithm. - - """ - return cachedfunc(RRCache(maxsize, choice, getsizeof), typed, lock) diff --git a/cachetools/ttl.py b/cachetools/ttl.py new file mode 100644 index 0000000..111ac05 --- /dev/null +++ b/cachetools/ttl.py @@ -0,0 +1,204 @@ +from .cache import Cache +from .decorators import cachedfunc +from .lock import RLock + +import time + + +class Link(object): + + __slots__ = ( + 'key', 'value', 'expire', + 'ttl_prev', 'ttl_next', + 'lru_prev', 'lru_next' + ) + + def unlink(self): + ttl_next = self.ttl_next + ttl_prev = self.ttl_prev + ttl_prev.ttl_next = ttl_next + ttl_next.ttl_prev = ttl_prev + + lru_next = self.lru_next + lru_prev = self.lru_prev + lru_prev.lru_next = lru_next + lru_next.lru_prev = lru_prev + + +class TTLCache(Cache): + """LRU Cache implementation with per-item time-to-live (TTL) value. + + This class associates a time-to-live value with each item. Items + that expire because they have exceeded their time-to-live will be + removed. If no expired items are there to remove, the least + recently used items will be discarded first to make space when + necessary. Trying to access an expired item will raise a + :exc:`KeyError`. + + By default, the time-to-live is specified in seconds, and the + standard :func:`time.time` function is used to retrieve the + current time. A custom `timer` function can be supplied if + needed. + + """ + + ExpiredError = KeyError # deprecated + + def __init__(self, maxsize, ttl, timer=time.time, missing=None, getsizeof=None): + if getsizeof is None: + Cache.__init__(self, maxsize, missing=missing) + else: + Cache.__init__(self, maxsize, missing=missing, getsizeof=lambda e: getsizeof(e.value)) + self.getsizeof = getsizeof + self.__root = root = Link() + root.ttl_prev = root.ttl_next = root + root.lru_prev = root.lru_next = root + self.__timer = timer + self.__ttl = ttl + + def __repr__(self, cache_getitem=Cache.__getitem__): + # prevent item reordering/expiration + return '%s(%r, maxsize=%d, currsize=%d)' % ( + self.__class__.__name__, + [(key, cache_getitem(self, key).value) for key in self], + self.maxsize, + self.currsize, + ) + + def __getitem__(self, key, + cache_getitem=Cache.__getitem__, + cache_missing=Cache.__missing__): + link = cache_getitem(self, key) + if link.expire < self.__timer(): + return cache_missing(self, key).value + next = link.lru_next + prev = link.lru_prev + prev.lru_next = next + next.lru_prev = prev + link.lru_next = root = self.__root + link.lru_prev = tail = root.lru_prev + tail.lru_next = root.lru_prev = link + return link.value + + def __setitem__(self, key, value, + cache_contains=Cache.__contains__, + cache_getitem=Cache.__getitem__, + cache_setitem=Cache.__setitem__): + time = self.__timer() + self.expire(time) + if cache_contains(self, key): + oldlink = cache_getitem(self, key) + else: + oldlink = None + link = Link() + link.key = key + link.value = value + link.expire = time + self.__ttl + cache_setitem(self, key, link) + if oldlink: + oldlink.unlink() + link.ttl_next = root = self.__root + link.ttl_prev = tail = root.ttl_prev + tail.ttl_next = root.ttl_prev = link + link.lru_next = root + link.lru_prev = tail = root.lru_prev + tail.lru_next = root.lru_prev = link + + def __delitem__(self, key, + cache_contains=Cache.__contains__, + cache_getitem=Cache.__getitem__, + cache_delitem=Cache.__delitem__): + if not cache_contains(self, key): + raise KeyError(key) + link = cache_getitem(self, key) + cache_delitem(self, key) + link.unlink() + self.expire() + + def __contains__(self, key, + cache_contains=Cache.__contains__, + cache_getitem=Cache.__getitem__): + if not cache_contains(self, key): + return False + elif cache_getitem(self, key).expire < self.__timer(): + return False + else: + return True + + def __iter__(self): + timer = self.__timer + root = self.__root + curr = root.ttl_next + while curr is not root: + if not (curr.expire < timer()): + yield curr.key + curr = curr.ttl_next + + def __len__(self, cache_len=Cache.__len__): + expired = 0 + time = self.__timer() + root = self.__root + head = root.ttl_next + while head is not root and head.expire < time: + expired += 1 + head = head.ttl_next + return cache_len(self) - expired + + def expire(self, time=None): + """Remove expired items from the cache. + + If `time` is not :const:`None`, remove all items whose + time-to-live would have expired by `time`. + + """ + if time is None: + time = self.__timer() + root = self.__root + head = root.ttl_next + cache_delitem = Cache.__delitem__ + while head is not root and head.expire < time: + cache_delitem(self, head.key) + next = head.ttl_next + head.unlink() + head = next + + def popitem(self): + """Remove and return the `(key, value)` pair least recently used.""" + root = self.__root + link = root.lru_next + if link is root: + raise KeyError('cache is empty') + key = link.key + Cache.__delitem__(self, key) + link.unlink() + return (key, link.value) + + @property + def currsize(self): + expired = 0 + time = self.__timer() + root = self.__root + head = root.ttl_next + while head is not root and head.expire < time: + expired += self.getsize(head.key) + head = head.ttl_next + return super(TTLCache, self).currsize - expired + + @property + def timer(self): + """Return the timer used by the cache.""" + return self.__timer + + @property + def ttl(self): + """Return the time-to-live of the cache.""" + return self.__ttl + + +def ttl_cache(maxsize=128, ttl=600, timer=time.time, typed=False, + getsizeof=None, lock=RLock): + """Decorator to wrap a function with a memoizing callable that saves + up to `maxsize` results based on a Least Recently Used (LRU) + algorithm with a per-item time-to-live (TTL) value. + """ + return cachedfunc(TTLCache(maxsize, ttl, timer, getsizeof), typed, lock) diff --git a/cachetools/ttlcache.py b/cachetools/ttlcache.py deleted file mode 100644 index 111ac05..0000000 --- a/cachetools/ttlcache.py +++ /dev/null @@ -1,204 +0,0 @@ -from .cache import Cache -from .decorators import cachedfunc -from .lock import RLock - -import time - - -class Link(object): - - __slots__ = ( - 'key', 'value', 'expire', - 'ttl_prev', 'ttl_next', - 'lru_prev', 'lru_next' - ) - - def unlink(self): - ttl_next = self.ttl_next - ttl_prev = self.ttl_prev - ttl_prev.ttl_next = ttl_next - ttl_next.ttl_prev = ttl_prev - - lru_next = self.lru_next - lru_prev = self.lru_prev - lru_prev.lru_next = lru_next - lru_next.lru_prev = lru_prev - - -class TTLCache(Cache): - """LRU Cache implementation with per-item time-to-live (TTL) value. - - This class associates a time-to-live value with each item. Items - that expire because they have exceeded their time-to-live will be - removed. If no expired items are there to remove, the least - recently used items will be discarded first to make space when - necessary. Trying to access an expired item will raise a - :exc:`KeyError`. - - By default, the time-to-live is specified in seconds, and the - standard :func:`time.time` function is used to retrieve the - current time. A custom `timer` function can be supplied if - needed. - - """ - - ExpiredError = KeyError # deprecated - - def __init__(self, maxsize, ttl, timer=time.time, missing=None, getsizeof=None): - if getsizeof is None: - Cache.__init__(self, maxsize, missing=missing) - else: - Cache.__init__(self, maxsize, missing=missing, getsizeof=lambda e: getsizeof(e.value)) - self.getsizeof = getsizeof - self.__root = root = Link() - root.ttl_prev = root.ttl_next = root - root.lru_prev = root.lru_next = root - self.__timer = timer - self.__ttl = ttl - - def __repr__(self, cache_getitem=Cache.__getitem__): - # prevent item reordering/expiration - return '%s(%r, maxsize=%d, currsize=%d)' % ( - self.__class__.__name__, - [(key, cache_getitem(self, key).value) for key in self], - self.maxsize, - self.currsize, - ) - - def __getitem__(self, key, - cache_getitem=Cache.__getitem__, - cache_missing=Cache.__missing__): - link = cache_getitem(self, key) - if link.expire < self.__timer(): - return cache_missing(self, key).value - next = link.lru_next - prev = link.lru_prev - prev.lru_next = next - next.lru_prev = prev - link.lru_next = root = self.__root - link.lru_prev = tail = root.lru_prev - tail.lru_next = root.lru_prev = link - return link.value - - def __setitem__(self, key, value, - cache_contains=Cache.__contains__, - cache_getitem=Cache.__getitem__, - cache_setitem=Cache.__setitem__): - time = self.__timer() - self.expire(time) - if cache_contains(self, key): - oldlink = cache_getitem(self, key) - else: - oldlink = None - link = Link() - link.key = key - link.value = value - link.expire = time + self.__ttl - cache_setitem(self, key, link) - if oldlink: - oldlink.unlink() - link.ttl_next = root = self.__root - link.ttl_prev = tail = root.ttl_prev - tail.ttl_next = root.ttl_prev = link - link.lru_next = root - link.lru_prev = tail = root.lru_prev - tail.lru_next = root.lru_prev = link - - def __delitem__(self, key, - cache_contains=Cache.__contains__, - cache_getitem=Cache.__getitem__, - cache_delitem=Cache.__delitem__): - if not cache_contains(self, key): - raise KeyError(key) - link = cache_getitem(self, key) - cache_delitem(self, key) - link.unlink() - self.expire() - - def __contains__(self, key, - cache_contains=Cache.__contains__, - cache_getitem=Cache.__getitem__): - if not cache_contains(self, key): - return False - elif cache_getitem(self, key).expire < self.__timer(): - return False - else: - return True - - def __iter__(self): - timer = self.__timer - root = self.__root - curr = root.ttl_next - while curr is not root: - if not (curr.expire < timer()): - yield curr.key - curr = curr.ttl_next - - def __len__(self, cache_len=Cache.__len__): - expired = 0 - time = self.__timer() - root = self.__root - head = root.ttl_next - while head is not root and head.expire < time: - expired += 1 - head = head.ttl_next - return cache_len(self) - expired - - def expire(self, time=None): - """Remove expired items from the cache. - - If `time` is not :const:`None`, remove all items whose - time-to-live would have expired by `time`. - - """ - if time is None: - time = self.__timer() - root = self.__root - head = root.ttl_next - cache_delitem = Cache.__delitem__ - while head is not root and head.expire < time: - cache_delitem(self, head.key) - next = head.ttl_next - head.unlink() - head = next - - def popitem(self): - """Remove and return the `(key, value)` pair least recently used.""" - root = self.__root - link = root.lru_next - if link is root: - raise KeyError('cache is empty') - key = link.key - Cache.__delitem__(self, key) - link.unlink() - return (key, link.value) - - @property - def currsize(self): - expired = 0 - time = self.__timer() - root = self.__root - head = root.ttl_next - while head is not root and head.expire < time: - expired += self.getsize(head.key) - head = head.ttl_next - return super(TTLCache, self).currsize - expired - - @property - def timer(self): - """Return the timer used by the cache.""" - return self.__timer - - @property - def ttl(self): - """Return the time-to-live of the cache.""" - return self.__ttl - - -def ttl_cache(maxsize=128, ttl=600, timer=time.time, typed=False, - getsizeof=None, lock=RLock): - """Decorator to wrap a function with a memoizing callable that saves - up to `maxsize` results based on a Least Recently Used (LRU) - algorithm with a per-item time-to-live (TTL) value. - """ - return cachedfunc(TTLCache(maxsize, ttl, timer, getsizeof), typed, lock) -- cgit v1.2.3 From 5f8051938a6a222b43c1cf2a9499f8c05f9ebafe Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Wed, 3 Dec 2014 13:07:43 +0100 Subject: Fix #27: Ignore ValueError in decorators. --- CHANGES.rst | 3 ++- cachetools/decorators.py | 10 ++++++++-- docs/index.rst | 2 +- setup.cfg | 2 +- tests/__init__.py | 11 +++++++++++ tests/test_cachedmethod.py | 10 ++++++++++ 6 files changed, 33 insertions(+), 5 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index c277dc3..25a2c90 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,11 +1,12 @@ 0.8.0 UNRELEASED - ---------------- - Add ``Cache.__missing__()``. - Add ``Cache.getsize()``. +- Ignore ``ValueError`` from cache insertion in decorators. + 0.7.1 2014-11-22 ---------------- diff --git a/cachetools/decorators.py b/cachetools/decorators.py index 44ffcb5..7299251 100644 --- a/cachetools/decorators.py +++ b/cachetools/decorators.py @@ -43,7 +43,10 @@ def cachedfunc(cache, typed=False, lock=None): stats[1] += 1 result = func(*args, **kwargs) with context: - cache[key] = result + try: + cache[key] = result + except ValueError: + pass # value too large return result def cache_info(): @@ -82,7 +85,10 @@ def cachedmethod(cache, typed=False): except KeyError: pass result = method(self, *args, **kwargs) - mapping[key] = result + try: + mapping[key] = result + except ValueError: + pass # value too large return result wrapper.cache = cache diff --git a/docs/index.rst b/docs/index.rst index 9041ce9..ddf68d7 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -161,7 +161,7 @@ The wrapped function is instrumented with :func:`cache_info` and performance and clear the cache. See the :func:`functools.lru_cache` documentation for details. -Unlike :func:`functools.lru_cache`, setting `maxsize` to zero or +Note that unlike :func:`functools.lru_cache`, setting `maxsize` to :const:`None` is not supported. .. decorator:: lfu_cache(maxsize=128, typed=False, getsizeof=None, lock=threading.RLock) diff --git a/setup.cfg b/setup.cfg index 581f1ba..8d5530a 100644 --- a/setup.cfg +++ b/setup.cfg @@ -2,7 +2,7 @@ universal = 1 [flake8] -exclude = .git,docs,setup.py +exclude = .git,build,docs,setup.py [nosetests] with-coverage = 1 diff --git a/tests/__init__.py b/tests/__init__.py index 9d1e61d..2026b27 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -224,6 +224,7 @@ class DecoratorTestMixin(object): def test_typed_decorator(self): cached = self.decorator(maxsize=2, typed=True)(lambda n: n) + self.assertEqual(cached(1), 1) self.assertEqual(cached.cache_info(), (0, 1, 2, 1)) self.assertEqual(cached(1), 1) @@ -232,3 +233,13 @@ class DecoratorTestMixin(object): self.assertEqual(cached.cache_info(), (1, 2, 2, 2)) self.assertEqual(cached(1.0), 1.0) self.assertEqual(cached.cache_info(), (2, 2, 2, 2)) + + def test_nocache_decorator(self): + cached = self.decorator(maxsize=0)(lambda n: n) + + self.assertEqual(cached(1), 1) + self.assertEqual(cached.cache_info(), (0, 1, 0, 0)) + self.assertEqual(cached(1), 1) + self.assertEqual(cached.cache_info(), (0, 2, 0, 0)) + self.assertEqual(cached(1.0), 1.0) + self.assertEqual(cached.cache_info(), (0, 3, 0, 0)) diff --git a/tests/test_cachedmethod.py b/tests/test_cachedmethod.py index 44a2f2f..bf4dac2 100644 --- a/tests/test_cachedmethod.py +++ b/tests/test_cachedmethod.py @@ -60,3 +60,13 @@ class CachedMethodTest(unittest.TestCase): self.assertEqual(cached.get(1), 2) self.assertEqual(cached.get(1.0), 3) self.assertEqual(cached.get(1.0), 4) + + def test_decorator_toolarge(self): + cached = Cached(LRUCache(maxsize=0)) + self.assertEqual(cached.cache, cached.get.cache(cached)) + + self.assertEqual(cached.get(0), 0) + self.assertEqual(cached.get(1), 1) + self.assertEqual(cached.get(1), 2) + self.assertEqual(cached.get(1.0), 3) + self.assertEqual(cached.get(1.0), 4) -- cgit v1.2.3 From bf2de581be97a236fad2913b34473b70d5b0aebc Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Wed, 3 Dec 2014 18:37:35 +0100 Subject: Prepare v0.8.0 --- CHANGES.rst | 6 +++--- cachetools/__init__.py | 4 ++-- cachetools/lru.py | 3 ++- cachetools/rr.py | 3 ++- cachetools/ttl.py | 12 +++++++----- tests/test_rrcache.py | 6 ++++-- tests/test_ttlcache.py | 3 ++- 7 files changed, 22 insertions(+), 15 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 25a2c90..becdfa6 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,11 +1,11 @@ -0.8.0 UNRELEASED +0.8.0 2014-12-03 ---------------- -- Add ``Cache.__missing__()``. +- Ignore ``ValueError`` raised on cache insertion in decorators. - Add ``Cache.getsize()``. -- Ignore ``ValueError`` from cache insertion in decorators. +- Add ``Cache.__missing__()``. 0.7.1 2014-11-22 diff --git a/cachetools/__init__.py b/cachetools/__init__.py index 32248f4..981209a 100644 --- a/cachetools/__init__.py +++ b/cachetools/__init__.py @@ -9,9 +9,9 @@ from .ttl import TTLCache, ttl_cache __all__ = ( 'Cache', - 'cachedmethod' + 'cachedmethod', 'LFUCache', 'LRUCache', 'RRCache', 'TTLCache', 'lfu_cache', 'lru_cache', 'rr_cache', 'ttl_cache', ) -__version__ = '0.8.0alpha' +__version__ = '0.8.0' diff --git a/cachetools/lru.py b/cachetools/lru.py index f697ce3..f2d8ac7 100644 --- a/cachetools/lru.py +++ b/cachetools/lru.py @@ -24,7 +24,8 @@ class LRUCache(Cache): def __init__(self, maxsize, missing=None, getsizeof=None): if getsizeof is not None: - Cache.__init__(self, maxsize, missing=missing, getsizeof=lambda link: getsizeof(link.value)) + linksize = lambda link: getsizeof(link.value) + Cache.__init__(self, maxsize, missing=missing, getsizeof=linksize) self.getsizeof = getsizeof else: Cache.__init__(self, maxsize, missing=missing) diff --git a/cachetools/rr.py b/cachetools/rr.py index f1badb9..3d72bb0 100644 --- a/cachetools/rr.py +++ b/cachetools/rr.py @@ -18,7 +18,8 @@ class RRCache(Cache): """ - def __init__(self, maxsize, choice=random.choice, missing=None, getsizeof=None): + def __init__(self, maxsize, choice=random.choice, missing=None, + getsizeof=None): Cache.__init__(self, maxsize, missing=missing, getsizeof=getsizeof) self.__choice = choice diff --git a/cachetools/ttl.py b/cachetools/ttl.py index 111ac05..a90af98 100644 --- a/cachetools/ttl.py +++ b/cachetools/ttl.py @@ -44,12 +44,14 @@ class TTLCache(Cache): ExpiredError = KeyError # deprecated - def __init__(self, maxsize, ttl, timer=time.time, missing=None, getsizeof=None): - if getsizeof is None: - Cache.__init__(self, maxsize, missing=missing) - else: - Cache.__init__(self, maxsize, missing=missing, getsizeof=lambda e: getsizeof(e.value)) + def __init__(self, maxsize, ttl, timer=time.time, missing=None, + getsizeof=None): + if getsizeof is not None: + linksize = lambda link: getsizeof(link.value) + Cache.__init__(self, maxsize, missing=missing, getsizeof=linksize) self.getsizeof = getsizeof + else: + Cache.__init__(self, maxsize, missing=missing) self.__root = root = Link() root.ttl_prev = root.ttl_next = root root.lru_prev = root.lru_next = root diff --git a/tests/test_rrcache.py b/tests/test_rrcache.py index 5a40b7a..c35437c 100644 --- a/tests/test_rrcache.py +++ b/tests/test_rrcache.py @@ -7,8 +7,10 @@ from cachetools import RRCache, rr_cache class RRCacheTest(unittest.TestCase, CacheTestMixin, DecoratorTestMixin): - def cache(self, maxsize, choice=random.choice, missing=None, getsizeof=None): - return RRCache(maxsize, choice=choice, missing=missing, getsizeof=getsizeof) + def cache(self, maxsize, choice=random.choice, missing=None, + getsizeof=None): + return RRCache(maxsize, choice=choice, missing=missing, + getsizeof=getsizeof) def decorator(self, maxsize, choice=random.choice, typed=False, lock=None): return rr_cache(maxsize, choice=choice, typed=typed, lock=lock) diff --git a/tests/test_ttlcache.py b/tests/test_ttlcache.py index 35227b2..f881091 100644 --- a/tests/test_ttlcache.py +++ b/tests/test_ttlcache.py @@ -18,7 +18,8 @@ class Timer: class TTLCacheTest(unittest.TestCase, CacheTestMixin, DecoratorTestMixin): def cache(self, maxsize, ttl=0, missing=None, getsizeof=None): - return TTLCache(maxsize, ttl, timer=Timer(), missing=missing, getsizeof=getsizeof) + return TTLCache(maxsize, ttl, timer=Timer(), missing=missing, + getsizeof=getsizeof) def decorator(self, maxsize, ttl=0, typed=False, lock=None): return ttl_cache(maxsize, ttl, timer=Timer(), typed=typed, lock=lock) -- cgit v1.2.3 From bf8d13e57ee9dba4da2570a6a193eced9a0b05d7 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Wed, 3 Dec 2014 18:50:47 +0100 Subject: Fix #28: Function arguments must be hashable for cache decorators. --- docs/index.rst | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/docs/index.rst b/docs/index.rst index ddf68d7..6e4edf5 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -47,11 +47,11 @@ different cache algorithms. All these classes derive from class current size of the cache. All cache classes accept an optional `missing` keyword argument in -their constructor, which can be used to provide a default or factory +their constructor, which can be used to provide a default factory function. If a key `key` is not present, the ``cache[key]`` operation calls :meth:`Cache.__missing__`, which in turn calls `missing` with `key` as argument. The cache will then store the object returned from -`missing(key)` as the new cache value for `key`, possibly discarding +``missing(key)`` as the new cache value for `key`, possibly discarding other items if the cache is full. This may be used to easily provide caching for existing single-argument functions, for example:: @@ -119,9 +119,8 @@ of one argument used to retrieve the size of an item's value. Function Decorators ------------------------------------------------------------------------ -This module provides several memoizing function decorators compatible -with -- though not necessarily as efficient as -- the Python 3 -Standard Library :func:`functools.lru_cache` decorator:: +This module provides several memoizing function decorators similar to +the Python 3 Standard Library :func:`functools.lru_cache` decorator:: import cachetools import urllib.request @@ -140,8 +139,8 @@ Standard Library :func:`functools.lru_cache` decorator:: print(n, 'Not Found') print(get_pep.cache_info()) -In addition to a `maxsize` parameter, all decorators feature some -optional keyword arguments: +In addition to a `maxsize` parameter, all decorators provide the +following optional keyword arguments: - `typed`, if is set to :const:`True`, will cause function arguments of different types to be cached separately. @@ -161,8 +160,10 @@ The wrapped function is instrumented with :func:`cache_info` and performance and clear the cache. See the :func:`functools.lru_cache` documentation for details. -Note that unlike :func:`functools.lru_cache`, setting `maxsize` to -:const:`None` is not supported. +Like for :func:`functools.lru_cache`, the positional and keyword +arguments to the function must be hashable. Note that unlike +:func:`functools.lru_cache`, setting `maxsize` to :const:`None` is not +supported. .. decorator:: lfu_cache(maxsize=128, typed=False, getsizeof=None, lock=threading.RLock) -- cgit v1.2.3 From 35855b5a02a066a51d1eaae670fc5c0efcc3a926 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Wed, 3 Dec 2014 18:54:11 +0100 Subject: Update CHANGES.rst --- CHANGES.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGES.rst b/CHANGES.rst index becdfa6..5e65593 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -7,6 +7,8 @@ - Add ``Cache.__missing__()``. +- Feature freeze for `v1.0`. + 0.7.1 2014-11-22 ---------------- -- cgit v1.2.3 From d7cb99468c24cc6f88c1a71687175c264771bc9b Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Sun, 7 Dec 2014 17:53:43 +0100 Subject: Prepare v0.8.1 --- CHANGES.rst | 6 ++ cachetools/__init__.py | 2 +- cachetools/cache.py | 3 +- cachetools/lfu.py | 2 +- cachetools/lru.py | 6 +- cachetools/rr.py | 2 +- cachetools/ttl.py | 15 ++--- docs/index.rst | 5 ++ tests/test_cache.py | 17 +++++ tests/test_lfu.py | 53 ++++++++++++++++ tests/test_lfucache.py | 53 ---------------- tests/test_lru.py | 60 ++++++++++++++++++ tests/test_lrucache.py | 60 ------------------ tests/test_rr.py | 40 ++++++++++++ tests/test_rrcache.py | 40 ------------ tests/test_ttl.py | 168 +++++++++++++++++++++++++++++++++++++++++++++++++ tests/test_ttlcache.py | 168 ------------------------------------------------- 17 files changed, 365 insertions(+), 335 deletions(-) create mode 100644 tests/test_lfu.py delete mode 100644 tests/test_lfucache.py create mode 100644 tests/test_lru.py delete mode 100644 tests/test_lrucache.py create mode 100644 tests/test_rr.py delete mode 100644 tests/test_rrcache.py create mode 100644 tests/test_ttl.py delete mode 100644 tests/test_ttlcache.py diff --git a/CHANGES.rst b/CHANGES.rst index 5e65593..06c80f7 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,9 @@ +0.8.1 2014-12-07 +---------------- + +- Deprecate ``Cache.getsize()``. + + 0.8.0 2014-12-03 ---------------- diff --git a/cachetools/__init__.py b/cachetools/__init__.py index 981209a..4a3c1ad 100644 --- a/cachetools/__init__.py +++ b/cachetools/__init__.py @@ -14,4 +14,4 @@ __all__ = ( 'lfu_cache', 'lru_cache', 'rr_cache', 'ttl_cache', ) -__version__ = '0.8.0' +__version__ = '0.8.1' diff --git a/cachetools/cache.py b/cachetools/cache.py index e26bd5b..779edcc 100644 --- a/cachetools/cache.py +++ b/cachetools/cache.py @@ -91,7 +91,8 @@ class Cache(collections.MutableMapping): return self.__currsize def getsize(self, key): - """Return the size of the cache item with key `key`.""" + import warnings + warnings.warn("Cache.getsize is deprecated", DeprecationWarning) return self.__data[key][1] def getsizeof(self, value): diff --git a/cachetools/lfu.py b/cachetools/lfu.py index b24094a..45b5b06 100644 --- a/cachetools/lfu.py +++ b/cachetools/lfu.py @@ -15,7 +15,7 @@ class LFUCache(Cache): """ def __init__(self, maxsize, missing=None, getsizeof=None): - Cache.__init__(self, maxsize, missing=missing, getsizeof=getsizeof) + Cache.__init__(self, maxsize, missing, getsizeof) self.__counter = collections.Counter() def __getitem__(self, key, cache_getitem=Cache.__getitem__): diff --git a/cachetools/lru.py b/cachetools/lru.py index f2d8ac7..2128908 100644 --- a/cachetools/lru.py +++ b/cachetools/lru.py @@ -24,11 +24,11 @@ class LRUCache(Cache): def __init__(self, maxsize, missing=None, getsizeof=None): if getsizeof is not None: - linksize = lambda link: getsizeof(link.value) - Cache.__init__(self, maxsize, missing=missing, getsizeof=linksize) + getlinksize = lambda link: getsizeof(link.value) + Cache.__init__(self, maxsize, missing, getlinksize) self.getsizeof = getsizeof else: - Cache.__init__(self, maxsize, missing=missing) + Cache.__init__(self, maxsize, missing) self.__root = root = Link() root.prev = root.next = root diff --git a/cachetools/rr.py b/cachetools/rr.py index 3d72bb0..00c6f34 100644 --- a/cachetools/rr.py +++ b/cachetools/rr.py @@ -20,7 +20,7 @@ class RRCache(Cache): def __init__(self, maxsize, choice=random.choice, missing=None, getsizeof=None): - Cache.__init__(self, maxsize, missing=missing, getsizeof=getsizeof) + Cache.__init__(self, maxsize, missing, getsizeof) self.__choice = choice def popitem(self): diff --git a/cachetools/ttl.py b/cachetools/ttl.py index a90af98..62f77b7 100644 --- a/cachetools/ttl.py +++ b/cachetools/ttl.py @@ -8,11 +8,14 @@ import time class Link(object): __slots__ = ( - 'key', 'value', 'expire', + 'key', 'value', 'expire', 'size', 'ttl_prev', 'ttl_next', 'lru_prev', 'lru_next' ) + def getsize(self): + return self.size + def unlink(self): ttl_next = self.ttl_next ttl_prev = self.ttl_prev @@ -42,16 +45,13 @@ class TTLCache(Cache): """ - ExpiredError = KeyError # deprecated - def __init__(self, maxsize, ttl, timer=time.time, missing=None, getsizeof=None): if getsizeof is not None: - linksize = lambda link: getsizeof(link.value) - Cache.__init__(self, maxsize, missing=missing, getsizeof=linksize) + Cache.__init__(self, maxsize, missing, Link.getsize) self.getsizeof = getsizeof else: - Cache.__init__(self, maxsize, missing=missing) + Cache.__init__(self, maxsize, missing) self.__root = root = Link() root.ttl_prev = root.ttl_next = root root.lru_prev = root.lru_next = root @@ -96,6 +96,7 @@ class TTLCache(Cache): link.key = key link.value = value link.expire = time + self.__ttl + link.size = self.getsizeof(value) cache_setitem(self, key, link) if oldlink: oldlink.unlink() @@ -182,7 +183,7 @@ class TTLCache(Cache): root = self.__root head = root.ttl_next while head is not root and head.expire < time: - expired += self.getsize(head.key) + expired += head.size head = head.ttl_next return super(TTLCache, self).currsize - expired diff --git a/docs/index.rst b/docs/index.rst index 6e4edf5..a5eae45 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -84,6 +84,11 @@ of one argument used to retrieve the size of an item's value. .. autoclass:: Cache :members: + :exclude-members: getsize + + .. method:: getsize + + .. deprecated:: 0.8.1 .. autoclass:: LFUCache :members: diff --git a/tests/test_cache.py b/tests/test_cache.py index 433a733..51536db 100644 --- a/tests/test_cache.py +++ b/tests/test_cache.py @@ -8,3 +8,20 @@ class CacheTest(unittest.TestCase, CacheTestMixin): def cache(self, maxsize, missing=None, getsizeof=None): return Cache(maxsize, missing=missing, getsizeof=getsizeof) + + def test_getsize(self): + # Cache.getsize is deprecated + cache = self.cache(maxsize=3, getsizeof=lambda x: x) + cache.update({1: 1, 2: 2}) + + import warnings + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + self.assertEqual(1, cache.getsize(1)) + self.assertEqual(1, len(w)) + self.assertEqual(w[0].category, DeprecationWarning) + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + self.assertEqual(2, cache.getsize(2)) + self.assertEqual(1, len(w)) + self.assertEqual(w[0].category, DeprecationWarning) diff --git a/tests/test_lfu.py b/tests/test_lfu.py new file mode 100644 index 0000000..3b69647 --- /dev/null +++ b/tests/test_lfu.py @@ -0,0 +1,53 @@ +import unittest + +from . import CacheTestMixin, DecoratorTestMixin +from cachetools import LFUCache, lfu_cache + + +class LFUCacheTest(unittest.TestCase, CacheTestMixin, DecoratorTestMixin): + + def cache(self, maxsize, missing=None, getsizeof=None): + return LFUCache(maxsize, missing=missing, getsizeof=getsizeof) + + def decorator(self, maxsize, typed=False, lock=None): + return lfu_cache(maxsize, typed=typed, lock=lock) + + def test_lfu(self): + cache = self.cache(maxsize=2) + + cache[1] = 1 + cache[1] + cache[2] = 2 + cache[3] = 3 + + self.assertEqual(len(cache), 2) + self.assertEqual(cache[1], 1) + self.assertTrue(2 in cache or 3 in cache) + self.assertTrue(2 not in cache or 3 not in cache) + + cache[4] = 4 + self.assertEqual(len(cache), 2) + self.assertEqual(cache[4], 4) + self.assertEqual(cache[1], 1) + + def test_lfu_getsizeof(self): + cache = self.cache(maxsize=3, getsizeof=lambda x: x) + + cache[1] = 1 + cache[2] = 2 + + self.assertEqual(len(cache), 2) + self.assertEqual(cache[1], 1) + self.assertEqual(cache[2], 2) + + cache[3] = 3 + + self.assertEqual(len(cache), 1) + self.assertEqual(cache[3], 3) + self.assertNotIn(1, cache) + self.assertNotIn(2, cache) + + with self.assertRaises(ValueError): + cache[4] = 4 + self.assertEqual(len(cache), 1) + self.assertEqual(cache[3], 3) diff --git a/tests/test_lfucache.py b/tests/test_lfucache.py deleted file mode 100644 index 3b69647..0000000 --- a/tests/test_lfucache.py +++ /dev/null @@ -1,53 +0,0 @@ -import unittest - -from . import CacheTestMixin, DecoratorTestMixin -from cachetools import LFUCache, lfu_cache - - -class LFUCacheTest(unittest.TestCase, CacheTestMixin, DecoratorTestMixin): - - def cache(self, maxsize, missing=None, getsizeof=None): - return LFUCache(maxsize, missing=missing, getsizeof=getsizeof) - - def decorator(self, maxsize, typed=False, lock=None): - return lfu_cache(maxsize, typed=typed, lock=lock) - - def test_lfu(self): - cache = self.cache(maxsize=2) - - cache[1] = 1 - cache[1] - cache[2] = 2 - cache[3] = 3 - - self.assertEqual(len(cache), 2) - self.assertEqual(cache[1], 1) - self.assertTrue(2 in cache or 3 in cache) - self.assertTrue(2 not in cache or 3 not in cache) - - cache[4] = 4 - self.assertEqual(len(cache), 2) - self.assertEqual(cache[4], 4) - self.assertEqual(cache[1], 1) - - def test_lfu_getsizeof(self): - cache = self.cache(maxsize=3, getsizeof=lambda x: x) - - cache[1] = 1 - cache[2] = 2 - - self.assertEqual(len(cache), 2) - self.assertEqual(cache[1], 1) - self.assertEqual(cache[2], 2) - - cache[3] = 3 - - self.assertEqual(len(cache), 1) - self.assertEqual(cache[3], 3) - self.assertNotIn(1, cache) - self.assertNotIn(2, cache) - - with self.assertRaises(ValueError): - cache[4] = 4 - self.assertEqual(len(cache), 1) - self.assertEqual(cache[3], 3) diff --git a/tests/test_lru.py b/tests/test_lru.py new file mode 100644 index 0000000..f21170d --- /dev/null +++ b/tests/test_lru.py @@ -0,0 +1,60 @@ +import unittest + +from . import CacheTestMixin, DecoratorTestMixin +from cachetools import LRUCache, lru_cache + + +class LRUCacheTest(unittest.TestCase, CacheTestMixin, DecoratorTestMixin): + + def cache(self, maxsize, missing=None, getsizeof=None): + return LRUCache(maxsize, missing=missing, getsizeof=getsizeof) + + def decorator(self, maxsize, typed=False, lock=None): + return lru_cache(maxsize, typed=typed, lock=lock) + + def test_lru(self): + cache = self.cache(maxsize=2) + + cache[1] = 1 + cache[2] = 2 + cache[3] = 3 + + self.assertEqual(len(cache), 2) + self.assertEqual(cache[2], 2) + self.assertEqual(cache[3], 3) + self.assertNotIn(1, cache) + + cache[2] + cache[4] = 4 + self.assertEqual(len(cache), 2) + self.assertEqual(cache[2], 2) + self.assertEqual(cache[4], 4) + self.assertNotIn(3, cache) + + cache[5] = 5 + self.assertEqual(len(cache), 2) + self.assertEqual(cache[4], 4) + self.assertEqual(cache[5], 5) + self.assertNotIn(2, cache) + + def test_lru_getsizeof(self): + cache = self.cache(maxsize=3, getsizeof=lambda x: x) + + cache[1] = 1 + cache[2] = 2 + + self.assertEqual(len(cache), 2) + self.assertEqual(cache[1], 1) + self.assertEqual(cache[2], 2) + + cache[3] = 3 + + self.assertEqual(len(cache), 1) + self.assertEqual(cache[3], 3) + self.assertNotIn(1, cache) + self.assertNotIn(2, cache) + + with self.assertRaises(ValueError): + cache[4] = 4 + self.assertEqual(len(cache), 1) + self.assertEqual(cache[3], 3) diff --git a/tests/test_lrucache.py b/tests/test_lrucache.py deleted file mode 100644 index f21170d..0000000 --- a/tests/test_lrucache.py +++ /dev/null @@ -1,60 +0,0 @@ -import unittest - -from . import CacheTestMixin, DecoratorTestMixin -from cachetools import LRUCache, lru_cache - - -class LRUCacheTest(unittest.TestCase, CacheTestMixin, DecoratorTestMixin): - - def cache(self, maxsize, missing=None, getsizeof=None): - return LRUCache(maxsize, missing=missing, getsizeof=getsizeof) - - def decorator(self, maxsize, typed=False, lock=None): - return lru_cache(maxsize, typed=typed, lock=lock) - - def test_lru(self): - cache = self.cache(maxsize=2) - - cache[1] = 1 - cache[2] = 2 - cache[3] = 3 - - self.assertEqual(len(cache), 2) - self.assertEqual(cache[2], 2) - self.assertEqual(cache[3], 3) - self.assertNotIn(1, cache) - - cache[2] - cache[4] = 4 - self.assertEqual(len(cache), 2) - self.assertEqual(cache[2], 2) - self.assertEqual(cache[4], 4) - self.assertNotIn(3, cache) - - cache[5] = 5 - self.assertEqual(len(cache), 2) - self.assertEqual(cache[4], 4) - self.assertEqual(cache[5], 5) - self.assertNotIn(2, cache) - - def test_lru_getsizeof(self): - cache = self.cache(maxsize=3, getsizeof=lambda x: x) - - cache[1] = 1 - cache[2] = 2 - - self.assertEqual(len(cache), 2) - self.assertEqual(cache[1], 1) - self.assertEqual(cache[2], 2) - - cache[3] = 3 - - self.assertEqual(len(cache), 1) - self.assertEqual(cache[3], 3) - self.assertNotIn(1, cache) - self.assertNotIn(2, cache) - - with self.assertRaises(ValueError): - cache[4] = 4 - self.assertEqual(len(cache), 1) - self.assertEqual(cache[3], 3) diff --git a/tests/test_rr.py b/tests/test_rr.py new file mode 100644 index 0000000..c35437c --- /dev/null +++ b/tests/test_rr.py @@ -0,0 +1,40 @@ +import unittest +import random + +from . import CacheTestMixin, DecoratorTestMixin +from cachetools import RRCache, rr_cache + + +class RRCacheTest(unittest.TestCase, CacheTestMixin, DecoratorTestMixin): + + def cache(self, maxsize, choice=random.choice, missing=None, + getsizeof=None): + return RRCache(maxsize, choice=choice, missing=missing, + getsizeof=getsizeof) + + def decorator(self, maxsize, choice=random.choice, typed=False, lock=None): + return rr_cache(maxsize, choice=choice, typed=typed, lock=lock) + + def test_choice(self): + cache = self.cache(maxsize=2, choice=min) + + cache[1] = 1 + cache[2] = 2 + cache[3] = 3 + + self.assertEqual(2, len(cache)) + self.assertEqual(2, cache[2]) + self.assertEqual(3, cache[3]) + self.assertNotIn(1, cache) + + cache[0] = 0 + self.assertEqual(2, len(cache)) + self.assertEqual(0, cache[0]) + self.assertEqual(3, cache[3]) + self.assertNotIn(2, cache) + + cache[4] = 4 + self.assertEqual(2, len(cache)) + self.assertEqual(3, cache[3]) + self.assertEqual(4, cache[4]) + self.assertNotIn(0, cache) diff --git a/tests/test_rrcache.py b/tests/test_rrcache.py deleted file mode 100644 index c35437c..0000000 --- a/tests/test_rrcache.py +++ /dev/null @@ -1,40 +0,0 @@ -import unittest -import random - -from . import CacheTestMixin, DecoratorTestMixin -from cachetools import RRCache, rr_cache - - -class RRCacheTest(unittest.TestCase, CacheTestMixin, DecoratorTestMixin): - - def cache(self, maxsize, choice=random.choice, missing=None, - getsizeof=None): - return RRCache(maxsize, choice=choice, missing=missing, - getsizeof=getsizeof) - - def decorator(self, maxsize, choice=random.choice, typed=False, lock=None): - return rr_cache(maxsize, choice=choice, typed=typed, lock=lock) - - def test_choice(self): - cache = self.cache(maxsize=2, choice=min) - - cache[1] = 1 - cache[2] = 2 - cache[3] = 3 - - self.assertEqual(2, len(cache)) - self.assertEqual(2, cache[2]) - self.assertEqual(3, cache[3]) - self.assertNotIn(1, cache) - - cache[0] = 0 - self.assertEqual(2, len(cache)) - self.assertEqual(0, cache[0]) - self.assertEqual(3, cache[3]) - self.assertNotIn(2, cache) - - cache[4] = 4 - self.assertEqual(2, len(cache)) - self.assertEqual(3, cache[3]) - self.assertEqual(4, cache[4]) - self.assertNotIn(0, cache) diff --git a/tests/test_ttl.py b/tests/test_ttl.py new file mode 100644 index 0000000..f881091 --- /dev/null +++ b/tests/test_ttl.py @@ -0,0 +1,168 @@ +import unittest + +from . import CacheTestMixin, DecoratorTestMixin +from cachetools import TTLCache, ttl_cache + + +class Timer: + def __init__(self): + self.__time = 0 + + def __call__(self): + return self.__time + + def tick(self): + self.__time += 1 + + +class TTLCacheTest(unittest.TestCase, CacheTestMixin, DecoratorTestMixin): + + def cache(self, maxsize, ttl=0, missing=None, getsizeof=None): + return TTLCache(maxsize, ttl, timer=Timer(), missing=missing, + getsizeof=getsizeof) + + def decorator(self, maxsize, ttl=0, typed=False, lock=None): + return ttl_cache(maxsize, ttl, timer=Timer(), typed=typed, lock=lock) + + def test_lru(self): + cache = self.cache(maxsize=2) + + cache[1] = 1 + cache[2] = 2 + cache[3] = 3 + + self.assertEqual(len(cache), 2) + self.assertNotIn(1, cache) + self.assertEqual(cache[2], 2) + self.assertEqual(cache[3], 3) + + cache[2] + cache[4] = 4 + self.assertEqual(len(cache), 2) + self.assertNotIn(1, cache) + self.assertEqual(cache[2], 2) + self.assertNotIn(3, cache) + self.assertEqual(cache[4], 4) + + cache[5] = 5 + self.assertEqual(len(cache), 2) + self.assertNotIn(1, cache) + self.assertNotIn(2, cache) + self.assertNotIn(3, cache) + self.assertEqual(cache[4], 4) + self.assertEqual(cache[5], 5) + + def test_ttl(self): + cache = self.cache(maxsize=2, ttl=1) + self.assertEqual(1, cache.ttl) + + cache[1] = 1 + self.assertEqual({1}, set(cache)) + self.assertEqual(1, len(cache)) + self.assertEqual(1, cache.currsize) + self.assertEqual(1, cache[1]) + + cache.timer.tick() + self.assertEqual({1}, set(cache)) + self.assertEqual(1, len(cache)) + self.assertEqual(1, cache.currsize) + self.assertEqual(1, cache[1]) + + cache[2] = 2 + self.assertEqual({1, 2}, set(cache)) + self.assertEqual(2, len(cache)) + self.assertEqual(2, cache.currsize) + self.assertEqual(1, cache[1]) + self.assertEqual(2, cache[2]) + + cache.timer.tick() + self.assertEqual({2}, set(cache)) + self.assertEqual(1, len(cache)) + self.assertEqual(1, cache.currsize) + self.assertNotIn(1, cache) + self.assertEqual(2, cache[2]) + + cache[3] = 3 + self.assertEqual({2, 3}, set(cache)) + self.assertEqual(2, len(cache)) + self.assertEqual(2, cache.currsize) + self.assertNotIn(1, cache) + self.assertEqual(2, cache[2]) + self.assertEqual(3, cache[3]) + + cache.timer.tick() + self.assertEqual({3}, set(cache)) + self.assertEqual(1, len(cache)) + self.assertEqual(1, cache.currsize) + self.assertNotIn(1, cache) + self.assertNotIn(2, cache) + self.assertEqual(3, cache[3]) + + cache.timer.tick() + self.assertEqual(set(), set(cache)) + self.assertEqual(0, len(cache)) + self.assertEqual(0, cache.currsize) + self.assertNotIn(1, cache) + self.assertNotIn(2, cache) + self.assertNotIn(3, cache) + + def test_expire(self): + cache = self.cache(maxsize=3, ttl=2) + self.assertEqual(2, cache.ttl) + + cache[1] = 1 + cache.timer.tick() + cache[2] = 2 + cache.timer.tick() + cache[3] = 3 + self.assertEqual(2, cache.timer()) + + self.assertEqual({1, 2, 3}, set(cache)) + self.assertEqual(3, len(cache)) + self.assertEqual(3, cache.currsize) + self.assertEqual(1, cache[1]) + self.assertEqual(2, cache[2]) + self.assertEqual(3, cache[3]) + + cache.expire() + self.assertEqual({1, 2, 3}, set(cache)) + self.assertEqual(3, len(cache)) + self.assertEqual(3, cache.currsize) + self.assertEqual(1, cache[1]) + self.assertEqual(2, cache[2]) + self.assertEqual(3, cache[3]) + + cache.expire(3) + self.assertEqual({2, 3}, set(cache)) + self.assertEqual(2, len(cache)) + self.assertEqual(2, cache.currsize) + self.assertNotIn(1, cache) + self.assertEqual(2, cache[2]) + self.assertEqual(3, cache[3]) + + cache.expire(4) + self.assertEqual({3}, set(cache)) + self.assertEqual(1, len(cache)) + self.assertEqual(1, cache.currsize) + self.assertNotIn(1, cache) + self.assertNotIn(2, cache) + self.assertEqual(3, cache[3]) + + cache.expire(5) + self.assertEqual(set(), set(cache)) + self.assertEqual(0, len(cache)) + self.assertEqual(0, cache.currsize) + self.assertNotIn(1, cache) + self.assertNotIn(2, cache) + self.assertNotIn(3, cache) + + def test_tuple_key(self): + cache = self.cache(maxsize=1, ttl=0) + self.assertEqual(0, cache.ttl) + + cache[(1, 2, 3)] = 42 + self.assertEqual(42, cache[(1, 2, 3)]) + cache.timer.tick() + with self.assertRaises(KeyError): + cache[(1, 2, 3)] + self.assertNotIn((1, 2, 3), cache) diff --git a/tests/test_ttlcache.py b/tests/test_ttlcache.py deleted file mode 100644 index f881091..0000000 --- a/tests/test_ttlcache.py +++ /dev/null @@ -1,168 +0,0 @@ -import unittest - -from . import CacheTestMixin, DecoratorTestMixin -from cachetools import TTLCache, ttl_cache - - -class Timer: - def __init__(self): - self.__time = 0 - - def __call__(self): - return self.__time - - def tick(self): - self.__time += 1 - - -class TTLCacheTest(unittest.TestCase, CacheTestMixin, DecoratorTestMixin): - - def cache(self, maxsize, ttl=0, missing=None, getsizeof=None): - return TTLCache(maxsize, ttl, timer=Timer(), missing=missing, - getsizeof=getsizeof) - - def decorator(self, maxsize, ttl=0, typed=False, lock=None): - return ttl_cache(maxsize, ttl, timer=Timer(), typed=typed, lock=lock) - - def test_lru(self): - cache = self.cache(maxsize=2) - - cache[1] = 1 - cache[2] = 2 - cache[3] = 3 - - self.assertEqual(len(cache), 2) - self.assertNotIn(1, cache) - self.assertEqual(cache[2], 2) - self.assertEqual(cache[3], 3) - - cache[2] - cache[4] = 4 - self.assertEqual(len(cache), 2) - self.assertNotIn(1, cache) - self.assertEqual(cache[2], 2) - self.assertNotIn(3, cache) - self.assertEqual(cache[4], 4) - - cache[5] = 5 - self.assertEqual(len(cache), 2) - self.assertNotIn(1, cache) - self.assertNotIn(2, cache) - self.assertNotIn(3, cache) - self.assertEqual(cache[4], 4) - self.assertEqual(cache[5], 5) - - def test_ttl(self): - cache = self.cache(maxsize=2, ttl=1) - self.assertEqual(1, cache.ttl) - - cache[1] = 1 - self.assertEqual({1}, set(cache)) - self.assertEqual(1, len(cache)) - self.assertEqual(1, cache.currsize) - self.assertEqual(1, cache[1]) - - cache.timer.tick() - self.assertEqual({1}, set(cache)) - self.assertEqual(1, len(cache)) - self.assertEqual(1, cache.currsize) - self.assertEqual(1, cache[1]) - - cache[2] = 2 - self.assertEqual({1, 2}, set(cache)) - self.assertEqual(2, len(cache)) - self.assertEqual(2, cache.currsize) - self.assertEqual(1, cache[1]) - self.assertEqual(2, cache[2]) - - cache.timer.tick() - self.assertEqual({2}, set(cache)) - self.assertEqual(1, len(cache)) - self.assertEqual(1, cache.currsize) - self.assertNotIn(1, cache) - self.assertEqual(2, cache[2]) - - cache[3] = 3 - self.assertEqual({2, 3}, set(cache)) - self.assertEqual(2, len(cache)) - self.assertEqual(2, cache.currsize) - self.assertNotIn(1, cache) - self.assertEqual(2, cache[2]) - self.assertEqual(3, cache[3]) - - cache.timer.tick() - self.assertEqual({3}, set(cache)) - self.assertEqual(1, len(cache)) - self.assertEqual(1, cache.currsize) - self.assertNotIn(1, cache) - self.assertNotIn(2, cache) - self.assertEqual(3, cache[3]) - - cache.timer.tick() - self.assertEqual(set(), set(cache)) - self.assertEqual(0, len(cache)) - self.assertEqual(0, cache.currsize) - self.assertNotIn(1, cache) - self.assertNotIn(2, cache) - self.assertNotIn(3, cache) - - def test_expire(self): - cache = self.cache(maxsize=3, ttl=2) - self.assertEqual(2, cache.ttl) - - cache[1] = 1 - cache.timer.tick() - cache[2] = 2 - cache.timer.tick() - cache[3] = 3 - self.assertEqual(2, cache.timer()) - - self.assertEqual({1, 2, 3}, set(cache)) - self.assertEqual(3, len(cache)) - self.assertEqual(3, cache.currsize) - self.assertEqual(1, cache[1]) - self.assertEqual(2, cache[2]) - self.assertEqual(3, cache[3]) - - cache.expire() - self.assertEqual({1, 2, 3}, set(cache)) - self.assertEqual(3, len(cache)) - self.assertEqual(3, cache.currsize) - self.assertEqual(1, cache[1]) - self.assertEqual(2, cache[2]) - self.assertEqual(3, cache[3]) - - cache.expire(3) - self.assertEqual({2, 3}, set(cache)) - self.assertEqual(2, len(cache)) - self.assertEqual(2, cache.currsize) - self.assertNotIn(1, cache) - self.assertEqual(2, cache[2]) - self.assertEqual(3, cache[3]) - - cache.expire(4) - self.assertEqual({3}, set(cache)) - self.assertEqual(1, len(cache)) - self.assertEqual(1, cache.currsize) - self.assertNotIn(1, cache) - self.assertNotIn(2, cache) - self.assertEqual(3, cache[3]) - - cache.expire(5) - self.assertEqual(set(), set(cache)) - self.assertEqual(0, len(cache)) - self.assertEqual(0, cache.currsize) - self.assertNotIn(1, cache) - self.assertNotIn(2, cache) - self.assertNotIn(3, cache) - - def test_tuple_key(self): - cache = self.cache(maxsize=1, ttl=0) - self.assertEqual(0, cache.ttl) - - cache[(1, 2, 3)] = 42 - self.assertEqual(42, cache[(1, 2, 3)]) - cache.timer.tick() - with self.assertRaises(KeyError): - cache[(1, 2, 3)] - self.assertNotIn((1, 2, 3), cache) -- cgit v1.2.3 From b624bf83a2930164b74630c8141b8eb1bb659b1a Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Wed, 10 Dec 2014 06:50:01 +0100 Subject: Use NestedTimer for TTLCache. --- CHANGES.rst | 6 ++ cachetools/__init__.py | 2 +- cachetools/ttl.py | 173 +++++++++++++++++++++++++++++++------------------ docs/index.rst | 18 ----- tests/test_ttl.py | 27 ++++++-- 5 files changed, 139 insertions(+), 87 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 06c80f7..1536736 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,9 @@ +0.8.2 2014-12-15 +---------------- + +- Use a ``NestedTimer`` for ``TTLCache``. + + 0.8.1 2014-12-07 ---------------- diff --git a/cachetools/__init__.py b/cachetools/__init__.py index 4a3c1ad..3cead10 100644 --- a/cachetools/__init__.py +++ b/cachetools/__init__.py @@ -14,4 +14,4 @@ __all__ = ( 'lfu_cache', 'lru_cache', 'rr_cache', 'ttl_cache', ) -__version__ = '0.8.1' +__version__ = '0.8.2' diff --git a/cachetools/ttl.py b/cachetools/ttl.py index 62f77b7..2883d22 100644 --- a/cachetools/ttl.py +++ b/cachetools/ttl.py @@ -2,6 +2,7 @@ from .cache import Cache from .decorators import cachedfunc from .lock import RLock +import functools import time @@ -28,6 +29,32 @@ class Link(object): lru_next.lru_prev = lru_prev +class NestedTimer(object): + + def __init__(self, timer): + self.__timer = timer + self.__nesting = 0 + + def __enter__(self): + if self.__nesting == 0: + self.__time = self.__timer() + self.__nesting += 1 + return self.__time + + def __exit__(self, *exc): + self.__nesting -= 1 + + def __call__(self): + if self.__nesting == 0: + return self.__timer() + else: + return self.__time + + def __getattr__(self, name): + # FIXME: for unittests timer.tick() + return getattr(self.__timer, name) + + class TTLCache(Cache): """LRU Cache implementation with per-item time-to-live (TTL) value. @@ -52,10 +79,10 @@ class TTLCache(Cache): self.getsizeof = getsizeof else: Cache.__init__(self, maxsize, missing) + self.__timer = NestedTimer(timer) self.__root = root = Link() root.ttl_prev = root.ttl_next = root root.lru_prev = root.lru_next = root - self.__timer = timer self.__ttl = ttl def __repr__(self, cache_getitem=Cache.__getitem__): @@ -70,81 +97,85 @@ class TTLCache(Cache): def __getitem__(self, key, cache_getitem=Cache.__getitem__, cache_missing=Cache.__missing__): - link = cache_getitem(self, key) - if link.expire < self.__timer(): - return cache_missing(self, key).value - next = link.lru_next - prev = link.lru_prev - prev.lru_next = next - next.lru_prev = prev - link.lru_next = root = self.__root - link.lru_prev = tail = root.lru_prev - tail.lru_next = root.lru_prev = link - return link.value + with self.__timer as time: + link = cache_getitem(self, key) + if link.expire < time: + return cache_missing(self, key).value + next = link.lru_next + prev = link.lru_prev + prev.lru_next = next + next.lru_prev = prev + link.lru_next = root = self.__root + link.lru_prev = tail = root.lru_prev + tail.lru_next = root.lru_prev = link + return link.value def __setitem__(self, key, value, cache_contains=Cache.__contains__, cache_getitem=Cache.__getitem__, cache_setitem=Cache.__setitem__): - time = self.__timer() - self.expire(time) - if cache_contains(self, key): - oldlink = cache_getitem(self, key) - else: - oldlink = None - link = Link() - link.key = key - link.value = value - link.expire = time + self.__ttl - link.size = self.getsizeof(value) - cache_setitem(self, key, link) - if oldlink: - oldlink.unlink() - link.ttl_next = root = self.__root - link.ttl_prev = tail = root.ttl_prev - tail.ttl_next = root.ttl_prev = link - link.lru_next = root - link.lru_prev = tail = root.lru_prev - tail.lru_next = root.lru_prev = link + with self.__timer as time: + self.expire(time) + if cache_contains(self, key): + oldlink = cache_getitem(self, key) + else: + oldlink = None + link = Link() + link.key = key + link.value = value + link.expire = time + self.__ttl + link.size = self.getsizeof(value) + cache_setitem(self, key, link) + if oldlink: + oldlink.unlink() + link.ttl_next = root = self.__root + link.ttl_prev = tail = root.ttl_prev + tail.ttl_next = root.ttl_prev = link + link.lru_next = root + link.lru_prev = tail = root.lru_prev + tail.lru_next = root.lru_prev = link def __delitem__(self, key, cache_contains=Cache.__contains__, cache_getitem=Cache.__getitem__, cache_delitem=Cache.__delitem__): - if not cache_contains(self, key): - raise KeyError(key) - link = cache_getitem(self, key) - cache_delitem(self, key) - link.unlink() - self.expire() + with self.__timer as time: + self.expire(time) + if not cache_contains(self, key): + raise KeyError(key) + link = cache_getitem(self, key) + cache_delitem(self, key) + link.unlink() def __contains__(self, key, cache_contains=Cache.__contains__, cache_getitem=Cache.__getitem__): - if not cache_contains(self, key): - return False - elif cache_getitem(self, key).expire < self.__timer(): - return False - else: - return True + with self.__timer as time: + if not cache_contains(self, key): + return False + elif cache_getitem(self, key).expire < time: + return False + else: + return True def __iter__(self): timer = self.__timer root = self.__root curr = root.ttl_next while curr is not root: - if not (curr.expire < timer()): - yield curr.key + with timer as time: + if not (curr.expire < time): + yield curr.key curr = curr.ttl_next def __len__(self, cache_len=Cache.__len__): - expired = 0 - time = self.__timer() root = self.__root head = root.ttl_next - while head is not root and head.expire < time: - expired += 1 - head = head.ttl_next + expired = 0 + with self.__timer as time: + while head is not root and head.expire < time: + expired += 1 + head = head.ttl_next return cache_len(self) - expired def expire(self, time=None): @@ -167,24 +198,26 @@ class TTLCache(Cache): def popitem(self): """Remove and return the `(key, value)` pair least recently used.""" - root = self.__root - link = root.lru_next - if link is root: - raise KeyError('cache is empty') - key = link.key - Cache.__delitem__(self, key) - link.unlink() - return (key, link.value) + with self.__timer as time: + self.expire(time) + root = self.__root + link = root.lru_next + if link is root: + raise KeyError('cache is empty') + key = link.key + Cache.__delitem__(self, key) + link.unlink() + return (key, link.value) @property def currsize(self): - expired = 0 - time = self.__timer() root = self.__root head = root.ttl_next - while head is not root and head.expire < time: - expired += head.size - head = head.ttl_next + expired = 0 + with self.__timer as time: + while head is not root and head.expire < time: + expired += head.size + head = head.ttl_next return super(TTLCache, self).currsize - expired @property @@ -197,6 +230,18 @@ class TTLCache(Cache): """Return the time-to-live of the cache.""" return self.__ttl + # mixin methods + + def __nested(method): + def wrapper(self, *args, **kwargs): + with self.__timer: + return method(self, *args, **kwargs) + return functools.update_wrapper(wrapper, method) + + get = __nested(Cache.get) + pop = __nested(Cache.pop) + setdefault = __nested(Cache.setdefault) + def ttl_cache(maxsize=128, ttl=600, timer=time.time, typed=False, getsizeof=None, lock=RLock): diff --git a/docs/index.rst b/docs/index.rst index a5eae45..49dc379 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -101,24 +101,6 @@ of one argument used to retrieve the size of an item's value. .. autoclass:: TTLCache :members: - :exclude-members: ExpiredError - - Note that a cache item may expire at *any* time, so iterating over - the items of a :class:`TTLCache` may raise :exc:`KeyError` - unexpectedly:: - - from cachetools import TTLCache - import time - - cache = TTLCache(maxsize=100, ttl=1.0) - cache.update({1: 1, 2: 2, 3: 3}) - - for key in cache: - time.sleep(0.5) - try: - print(cache[key]) - except KeyError: - print('Key %r has expired' % key) Function Decorators diff --git a/tests/test_ttl.py b/tests/test_ttl.py index f881091..db9efe2 100644 --- a/tests/test_ttl.py +++ b/tests/test_ttl.py @@ -5,14 +5,17 @@ from cachetools import TTLCache, ttl_cache class Timer: - def __init__(self): - self.__time = 0 + def __init__(self, auto=False): + self.auto = auto + self.time = 0 def __call__(self): - return self.__time + if self.auto: + self.time += 1 + return self.time def tick(self): - self.__time += 1 + self.time += 1 class TTLCacheTest(unittest.TestCase, CacheTestMixin, DecoratorTestMixin): @@ -106,6 +109,11 @@ class TTLCacheTest(unittest.TestCase, CacheTestMixin, DecoratorTestMixin): self.assertNotIn(2, cache) self.assertNotIn(3, cache) + with self.assertRaises(KeyError): + del cache[1] + with self.assertRaises(KeyError): + cache.pop(2) + def test_expire(self): cache = self.cache(maxsize=3, ttl=2) self.assertEqual(2, cache.ttl) @@ -156,6 +164,17 @@ class TTLCacheTest(unittest.TestCase, CacheTestMixin, DecoratorTestMixin): self.assertNotIn(2, cache) self.assertNotIn(3, cache) + def test_atomic(self): + cache = TTLCache(maxsize=1, ttl=1, timer=Timer(auto=True)) + cache[1] = 1 + self.assertEqual(1, cache[1]) + cache[1] = 1 + self.assertEqual(1, cache.get(1)) + cache[1] = 1 + self.assertEqual(1, cache.pop(1)) + cache[1] = 1 + self.assertEqual(1, cache.setdefault(1)) + def test_tuple_key(self): cache = self.cache(maxsize=1, ttl=0) self.assertEqual(0, cache.ttl) -- cgit v1.2.3 From f3437a1226f630f011097692b3ee3a3e4e64a1d7 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Fri, 19 Dec 2014 15:56:11 +0100 Subject: Prepare v1.0.0 --- CHANGES.rst | 4 ++++ cachetools/__init__.py | 2 +- setup.py | 4 ++-- 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 1536736..feb677b 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,7 @@ +1.0.0 UNRELEASED +---------------- + + 0.8.2 2014-12-15 ---------------- diff --git a/cachetools/__init__.py b/cachetools/__init__.py index 3cead10..6093731 100644 --- a/cachetools/__init__.py +++ b/cachetools/__init__.py @@ -14,4 +14,4 @@ __all__ = ( 'lfu_cache', 'lru_cache', 'rr_cache', 'ttl_cache', ) -__version__ = '0.8.2' +__version__ = '1.0.0' diff --git a/setup.py b/setup.py index 22ce031..633bcf8 100644 --- a/setup.py +++ b/setup.py @@ -17,9 +17,9 @@ setup( license='MIT', description='Extensible memoizing collections and decorators', long_description=open('README.rst').read(), - keywords='cache caching LRU LFU TTL', + keywords='cache caching memoizing memoization LRU LFU TTL', classifiers=[ - 'Development Status :: 4 - Beta', + 'Development Status :: 5 - Production/Stable', 'Environment :: Other Environment', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', -- cgit v1.2.3 From 5c2a69ba94a59754da84a2659c0927c8a6cea694 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Fri, 19 Dec 2014 15:59:10 +0100 Subject: Fix #30: Provide RRCache.choice property. --- CHANGES.rst | 2 ++ cachetools/rr.py | 5 +++++ tests/test_rr.py | 1 + 3 files changed, 8 insertions(+) diff --git a/CHANGES.rst b/CHANGES.rst index feb677b..246f373 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,6 +1,8 @@ 1.0.0 UNRELEASED ---------------- +- Provide ``RRCache.choice`` property. + 0.8.2 2014-12-15 ---------------- diff --git a/cachetools/rr.py b/cachetools/rr.py index 00c6f34..014caec 100644 --- a/cachetools/rr.py +++ b/cachetools/rr.py @@ -31,6 +31,11 @@ class RRCache(Cache): raise KeyError('cache is empty') return (key, self.pop(key)) + @property + def choice(self): + """Return the `choice` function used by the cache.""" + return self.__choice + def rr_cache(maxsize=128, choice=random.choice, typed=False, getsizeof=None, lock=RLock): diff --git a/tests/test_rr.py b/tests/test_rr.py index c35437c..7af9863 100644 --- a/tests/test_rr.py +++ b/tests/test_rr.py @@ -17,6 +17,7 @@ class RRCacheTest(unittest.TestCase, CacheTestMixin, DecoratorTestMixin): def test_choice(self): cache = self.cache(maxsize=2, choice=min) + self.assertEqual(min, cache.choice) cache[1] = 1 cache[2] = 2 -- cgit v1.2.3 From c6137f2fbc28ef166975ae20c440e7b540444be7 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Fri, 19 Dec 2014 16:22:07 +0100 Subject: Fix #3: Improve documentation. --- CHANGES.rst | 2 ++ cachetools/cache.py | 26 +++------------ cachetools/lfu.py | 13 +++----- cachetools/lru.py | 7 +--- cachetools/rr.py | 18 +++------- cachetools/ttl.py | 38 +++++++--------------- docs/index.rst | 94 +++++++++++++++++++++++++++++++++++++++-------------- tests/test_cache.py | 17 ---------- tests/test_ttl.py | 3 ++ 9 files changed, 99 insertions(+), 119 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 246f373..50940eb 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -3,6 +3,8 @@ - Provide ``RRCache.choice`` property. +- Improve documentation. + 0.8.2 2014-12-15 ---------------- diff --git a/cachetools/cache.py b/cachetools/cache.py index 779edcc..0c692f9 100644 --- a/cachetools/cache.py +++ b/cachetools/cache.py @@ -6,20 +6,7 @@ def one(value): class Cache(collections.MutableMapping): - """Mutable mapping to serve as a simple cache or cache base class. - - This class discards arbitrary items using :meth:`popitem` to make - space when necessary. Derived classes may override - :meth:`popitem` to implement specific caching strategies. If a - subclass has to keep track of item access, insertion or deletion, - it may additionally need to override :meth:`__getitem__`, - :meth:`__setitem__` and :meth:`__delitem__`. If a subclass has to - keep meta data with its values, i.e. the `value` argument passed - to :meth:`Cache.__setitem__` is different from what a user would - regard as the cache's value, it will probably want to override - :meth:`getsizeof`, too. - - """ + """Mutable mapping to serve as a simple cache or cache base class.""" def __init__(self, maxsize, missing=None, getsizeof=None): self.__data = dict() @@ -82,21 +69,16 @@ class Cache(collections.MutableMapping): @property def maxsize(self): - """Return the maximum size of the cache.""" + """The maximum size of the cache.""" return self.__maxsize @property def currsize(self): - """Return the current size of the cache.""" + """The current size of the cache.""" return self.__currsize - def getsize(self, key): - import warnings - warnings.warn("Cache.getsize is deprecated", DeprecationWarning) - return self.__data[key][1] - def getsizeof(self, value): - """Return the size of a cache element.""" + """Return the size of a cache element's value.""" return self.__getsizeof(value) # collections.MutableMapping mixin methods do not handle __missing__ diff --git a/cachetools/lfu.py b/cachetools/lfu.py index 45b5b06..0afa9c9 100644 --- a/cachetools/lfu.py +++ b/cachetools/lfu.py @@ -1,18 +1,13 @@ +import collections +import operator + from .cache import Cache from .decorators import cachedfunc from .lock import RLock -import collections -import operator - class LFUCache(Cache): - """Least Frequently Used (LFU) cache implementation. - - This class counts how often an item is retrieved, and discards the - items used least often to make space when necessary. - - """ + """Least Frequently Used (LFU) cache implementation.""" def __init__(self, maxsize, missing=None, getsizeof=None): Cache.__init__(self, maxsize, missing, getsizeof) diff --git a/cachetools/lru.py b/cachetools/lru.py index 2128908..dbc9786 100644 --- a/cachetools/lru.py +++ b/cachetools/lru.py @@ -15,12 +15,7 @@ class Link(object): class LRUCache(Cache): - """Least Recently Used (LRU) cache implementation. - - This class discards the least recently used items first to make - space when necessary. - - """ + """Least Recently Used (LRU) cache implementation.""" def __init__(self, maxsize, missing=None, getsizeof=None): if getsizeof is not None: diff --git a/cachetools/rr.py b/cachetools/rr.py index 014caec..5119c48 100644 --- a/cachetools/rr.py +++ b/cachetools/rr.py @@ -1,22 +1,12 @@ +import random + from .cache import Cache from .decorators import cachedfunc from .lock import RLock -import random - class RRCache(Cache): - """Random Replacement (RR) cache implementation. - - This class randomly selects candidate items and discards them to - make space when necessary. - - By default, items are selected from the list of cache keys using - :func:`random.choice`. The optional argument `choice` may specify - an alternative function that returns an arbitrary element from a - non-empty sequence. - - """ + """Random Replacement (RR) cache implementation.""" def __init__(self, maxsize, choice=random.choice, missing=None, getsizeof=None): @@ -33,7 +23,7 @@ class RRCache(Cache): @property def choice(self): - """Return the `choice` function used by the cache.""" + """The `choice` function used by the cache.""" return self.__choice diff --git a/cachetools/ttl.py b/cachetools/ttl.py index 2883d22..db35aa3 100644 --- a/cachetools/ttl.py +++ b/cachetools/ttl.py @@ -1,10 +1,10 @@ +import functools +import time + from .cache import Cache from .decorators import cachedfunc from .lock import RLock -import functools -import time - class Link(object): @@ -56,21 +56,7 @@ class NestedTimer(object): class TTLCache(Cache): - """LRU Cache implementation with per-item time-to-live (TTL) value. - - This class associates a time-to-live value with each item. Items - that expire because they have exceeded their time-to-live will be - removed. If no expired items are there to remove, the least - recently used items will be discarded first to make space when - necessary. Trying to access an expired item will raise a - :exc:`KeyError`. - - By default, the time-to-live is specified in seconds, and the - standard :func:`time.time` function is used to retrieve the - current time. A custom `timer` function can be supplied if - needed. - - """ + """LRU Cache implementation with per-item time-to-live (TTL) value.""" def __init__(self, maxsize, ttl, timer=time.time, missing=None, getsizeof=None): @@ -179,12 +165,7 @@ class TTLCache(Cache): return cache_len(self) - expired def expire(self, time=None): - """Remove expired items from the cache. - - If `time` is not :const:`None`, remove all items whose - time-to-live would have expired by `time`. - - """ + """Remove expired items from the cache.""" if time is None: time = self.__timer() root = self.__root @@ -197,7 +178,10 @@ class TTLCache(Cache): head = next def popitem(self): - """Remove and return the `(key, value)` pair least recently used.""" + """Remove and return the `(key, value)` pair least recently used that + has not already expired. + + """ with self.__timer as time: self.expire(time) root = self.__root @@ -222,12 +206,12 @@ class TTLCache(Cache): @property def timer(self): - """Return the timer used by the cache.""" + """The timer function used by the cache.""" return self.__timer @property def ttl(self): - """Return the time-to-live of the cache.""" + """The time-to-live value of the cache's items.""" return self.__ttl # mixin methods diff --git a/docs/index.rst b/docs/index.rst index 49dc379..b0aaf0d 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -26,10 +26,12 @@ function decorator. For the purpose of this module, a *cache* is a mutable_ mapping_ of a fixed maximum size. When the cache is full, i.e. by adding another item the cache would exceed its maximum size, the cache must choose -which item(s) to discard based on a suitable `cache algorithm`_. A -cache's size is the sum of the size of its items, and an item's size -in general is a property or function of its value, e.g. the result of -:func:`sys.getsizeof`, or :func:`len` for string and sequence values. +which item(s) to discard based on a suitable `cache algorithm`_. In +general, a cache's size is the total size of its items, and an item's +size is a property or function of its value, e.g. the result of +``sys.getsizeof(value)``. For the trivial but common case that each +item counts as :const:`1`, irrespective of its value, a cache's size +is equal to the number of its items, or ``len(cache)``. This module provides multiple cache implementations based on different cache algorithms, as well as decorators for easily memoizing function @@ -47,13 +49,13 @@ different cache algorithms. All these classes derive from class current size of the cache. All cache classes accept an optional `missing` keyword argument in -their constructor, which can be used to provide a default factory -function. If a key `key` is not present, the ``cache[key]`` operation -calls :meth:`Cache.__missing__`, which in turn calls `missing` with -`key` as argument. The cache will then store the object returned from -``missing(key)`` as the new cache value for `key`, possibly discarding -other items if the cache is full. This may be used to easily provide -caching for existing single-argument functions, for example:: +their constructor, which can be used to provide a default *factory +function*. If the key `key` is not present, the ``cache[key]`` +operation calls :meth:`Cache.__missing__`, which in turn calls +`missing` with `key` as its sole argument. The cache will then store +the object returned from ``missing(key)`` as the new cache value for +`key`, possibly discarding other items if the cache is full. This may +be used provide memoization for existing single-argument functions:: from cachetools import LRUCache import urllib.request @@ -75,32 +77,76 @@ caching for existing single-argument functions, for example:: :class:`Cache` also features a :meth:`getsizeof` method, which returns -the size of a given item. The default implementation of -:meth:`getsizeof` returns :const:`1` irrespective of its `value` -argument, making the cache's size equal to the number of its items, or +the size of a given `value`. The default implementation of +:meth:`getsizeof` returns :const:`1` irrespective of its argument, +making the cache's size equal to the number of its items, or ``len(cache)``. For convenience, all cache classes accept an optional named constructor parameter `getsizeof`, which may specify a function of one argument used to retrieve the size of an item's value. .. autoclass:: Cache :members: - :exclude-members: getsize - .. method:: getsize - - .. deprecated:: 0.8.1 + This class discards arbitrary items using :meth:`popitem` to make + space when necessary. Derived classes may override :meth:`popitem` + to implement specific caching strategies. If a subclass has to + keep track of item access, insertion or deletion, it may + additionally need to override :meth:`__getitem__`, + :meth:`__setitem__` and :meth:`__delitem__`. If a subclass wants + to store meta data with its values, i.e. the `value` argument + passed to :meth:`Cache.__setitem__` is different from what the + derived class's :meth:`__setitem__` received, it will probably need + to override :meth:`getsizeof`, too. .. autoclass:: LFUCache :members: + This class counts how often an item is retrieved, and discards the + items used least often to make space when necessary. + .. autoclass:: LRUCache :members: -.. autoclass:: RRCache + This class discards the least recently used items first to make + space when necessary. + +.. autoclass:: RRCache(maxsize, choice=random.choice, missing=None, getsizeof=None) :members: -.. autoclass:: TTLCache + This class randomly selects candidate items and discards them to + make space when necessary. + + By default, items are selected from the list of cache keys using + :func:`random.choice`. The optional argument `choice` may specify + an alternative function that returns an arbitrary element from a + non-empty sequence. + +.. autoclass:: TTLCache(maxsize, ttl, timer=time.time, missing=None, getsizeof=None) :members: + :exclude-members: expire + + This class associates a time-to-live value with each item. Items + that expire because they have exceeded their time-to-live will be + removed automatically. If no expired items are there to remove, + the least recently used items will be discarded first to make space + when necessary. Trying to access an expired item will raise a + :exc:`KeyError`. + + By default, the time-to-live is specified in seconds, and the + :func:`time.time` function is used to retrieve the current time. A + custom `timer` function can be supplied if needed. + + .. automethod:: expire(self, time=None) + + Since expired items will be "physically" removed from a cache + only at the next mutating operation, e.g. :meth:`__setitem__` or + :meth:`__delitem__`, to avoid changing the underlying dictionary + while iterating over it, expired items may still claim memory + although they are no longer accessible. Calling this method + removes all items whose time-to-live would have expired by + `time`, so garbage collection is free to reuse their memory. If + `time` is :const:`None`, this removes all items that have + expired by the current value returned by :attr:`timer`. Function Decorators @@ -147,10 +193,10 @@ The wrapped function is instrumented with :func:`cache_info` and performance and clear the cache. See the :func:`functools.lru_cache` documentation for details. -Like for :func:`functools.lru_cache`, the positional and keyword -arguments to the function must be hashable. Note that unlike -:func:`functools.lru_cache`, setting `maxsize` to :const:`None` is not -supported. +Like with :func:`functools.lru_cache`, the positional and keyword +arguments to the underlying function must be hashable. Note that +unlike :func:`functools.lru_cache`, setting `maxsize` to :const:`None` +is not supported. .. decorator:: lfu_cache(maxsize=128, typed=False, getsizeof=None, lock=threading.RLock) diff --git a/tests/test_cache.py b/tests/test_cache.py index 51536db..433a733 100644 --- a/tests/test_cache.py +++ b/tests/test_cache.py @@ -8,20 +8,3 @@ class CacheTest(unittest.TestCase, CacheTestMixin): def cache(self, maxsize, missing=None, getsizeof=None): return Cache(maxsize, missing=missing, getsizeof=getsizeof) - - def test_getsize(self): - # Cache.getsize is deprecated - cache = self.cache(maxsize=3, getsizeof=lambda x: x) - cache.update({1: 1, 2: 2}) - - import warnings - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - self.assertEqual(1, cache.getsize(1)) - self.assertEqual(1, len(w)) - self.assertEqual(w[0].category, DeprecationWarning) - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - self.assertEqual(2, cache.getsize(2)) - self.assertEqual(1, len(w)) - self.assertEqual(w[0].category, DeprecationWarning) diff --git a/tests/test_ttl.py b/tests/test_ttl.py index db9efe2..d562c1c 100644 --- a/tests/test_ttl.py +++ b/tests/test_ttl.py @@ -57,6 +57,7 @@ class TTLCacheTest(unittest.TestCase, CacheTestMixin, DecoratorTestMixin): def test_ttl(self): cache = self.cache(maxsize=2, ttl=1) + self.assertEqual(0, cache.timer()) self.assertEqual(1, cache.ttl) cache[1] = 1 @@ -116,6 +117,8 @@ class TTLCacheTest(unittest.TestCase, CacheTestMixin, DecoratorTestMixin): def test_expire(self): cache = self.cache(maxsize=3, ttl=2) + with cache.timer as time: + self.assertEqual(time, cache.timer()) self.assertEqual(2, cache.ttl) cache[1] = 1 -- cgit v1.2.3 From cedcfccaa127dbe4d4208fd13919ff68529a4193 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Fri, 19 Dec 2014 18:29:11 +0100 Subject: Fix typo. --- docs/index.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/index.rst b/docs/index.rst index b0aaf0d..8a1dd1d 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -55,7 +55,7 @@ operation calls :meth:`Cache.__missing__`, which in turn calls `missing` with `key` as its sole argument. The cache will then store the object returned from ``missing(key)`` as the new cache value for `key`, possibly discarding other items if the cache is full. This may -be used provide memoization for existing single-argument functions:: +be used to provide memoization for existing single-argument functions:: from cachetools import LRUCache import urllib.request -- cgit v1.2.3 From 886f48e8e5e552d0d7df352ce2f0fff0130e4566 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Fri, 19 Dec 2014 18:30:57 +0100 Subject: Update README.rst --- README.rst | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/README.rst b/README.rst index 770c20d..255edff 100644 --- a/README.rst +++ b/README.rst @@ -21,14 +21,15 @@ function decorator. >>> cache LRUCache([('second', 2), ('fourth', 4)], maxsize=2, currsize=2) - For the purpose of this module, a *cache* is a mutable_ mapping_ of a -fixed maximum size. When the cache is full, i.e. the size of the -cache would exceed its maximum size, the cache must choose which -item(s) to discard based on a suitable `cache algorithm`_. A cache's -size is the sum of the size of its items, and an item's size in -general is a property or function of its value, e.g. the result of -``sys.getsizeof``, or ``len`` for string and sequence values. +fixed maximum size. When the cache is full, i.e. by adding another +item the cache would exceed its maximum size, the cache must choose +which item(s) to discard based on a suitable `cache algorithm`_. In +general, a cache's size is the total size of its items, and an item's +size is a property or function of its value, e.g. the result of +``sys.getsizeof(value)``. For the trivial but common case that each +item counts as ``1``, irrespective of its value, a cache's size is +equal to the number of its items, or ``len(cache)``. This module provides multiple cache implementations based on different cache algorithms, as well as decorators for easily memoizing function -- cgit v1.2.3 From 03bf4174c85cef51ab07e92212a9d7838a27093b Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Fri, 19 Dec 2014 18:36:26 +0100 Subject: Update CHANGES.rst --- CHANGES.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index 50940eb..880f17c 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,4 +1,4 @@ -1.0.0 UNRELEASED +1.0.0 2014-12-19 ---------------- - Provide ``RRCache.choice`` property. -- cgit v1.2.3 From 1daf0c7d278de97745854ae1b9bc38999ddad6d8 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Tue, 30 Dec 2014 10:47:08 +0100 Subject: Update keywords. --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 633bcf8..4a25ae6 100644 --- a/setup.py +++ b/setup.py @@ -17,7 +17,7 @@ setup( license='MIT', description='Extensible memoizing collections and decorators', long_description=open('README.rst').read(), - keywords='cache caching memoizing memoization LRU LFU TTL', + keywords='cache caching memoize memoizing memoization LRU LFU TTL', classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Other Environment', -- cgit v1.2.3 From 41efa89faf96f026518b150b29535e383aab0b97 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Sat, 23 May 2015 19:28:41 +0200 Subject: Fix #35: Emphasize the role of popitem() in cache implementations. --- docs/index.rst | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/docs/index.rst b/docs/index.rst index 8a1dd1d..d7de83d 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -44,9 +44,11 @@ Cache Implementations This module provides several classes implementing caches using different cache algorithms. All these classes derive from class :class:`Cache`, which in turn derives from -:class:`collections.MutableMapping`, providing additional properties -:attr:`maxsize` and :attr:`currsize` to retrieve the maximum and -current size of the cache. +:class:`collections.MutableMapping`, and provide :attr:`maxsize` and +:attr:`currsize` properties to retrieve the maximum and current size +of the cache. When a cache is full, :meth:`setitem` calls +:meth:`popitem` repeatedly until there is enough room for the item to +be added. All cache classes accept an optional `missing` keyword argument in their constructor, which can be used to provide a default *factory -- cgit v1.2.3 From 7827ec015a551f11c3ac55e3583b8592c4ae88d4 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Sat, 23 May 2015 21:09:38 +0200 Subject: Fix #32: Add docs/tests for using @cachedmethod with generic mutable mappings. --- docs/index.rst | 14 +++++---- tests/test_cachedmethod.py | 73 ++++++++++++++++++++++++++++++++++++++++------ 2 files changed, 73 insertions(+), 14 deletions(-) diff --git a/docs/index.rst b/docs/index.rst index d7de83d..fe9a459 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -235,11 +235,15 @@ Method Decorators instance or class. If `cache(self)` returns :const:`None`, the original underlying method is called directly and the result is not cached. The `cache` function is also available as the wrapped - function's :attr:`cache` attribute. - - Multiple methods of an object or class may share the same cache - object, but it is the user's responsibility to handle concurrent - cache access in a multi-threaded environment. + function's :attr:`cache` attribute. Multiple methods of an object + or class may share the same cache object, but it is the user's + responsibility to handle concurrent cache access in a + multi-threaded environment. + + Note that the objects returned from `cache` are not required to be + instances of the cache implementations provided by this module. + :func:`cachedmethod` should work with any mutable mapping type, be + it plain :class:`dict` or :class:`weakref.WeakValueDictionary`. One advantage of this decorator over the similar function decorators is that cache properties such as `maxsize` can be set at diff --git a/tests/test_cachedmethod.py b/tests/test_cachedmethod.py index bf4dac2..1440352 100644 --- a/tests/test_cachedmethod.py +++ b/tests/test_cachedmethod.py @@ -8,8 +8,9 @@ class Cached(object): count = 0 - def __init__(self, cache): + def __init__(self, cache, count=0): self.cache = cache + self.count = count @cachedmethod(operator.attrgetter('cache')) def get(self, value): @@ -26,7 +27,32 @@ class Cached(object): class CachedMethodTest(unittest.TestCase): - def test_decorator(self): + def test_dict(self): + cached = Cached({}) + self.assertEqual(cached.cache, cached.get.cache(cached)) + + self.assertEqual(cached.get(0), 0) + self.assertEqual(cached.get(1), 1) + self.assertEqual(cached.get(1), 1) + self.assertEqual(cached.get(1.0), 1) + self.assertEqual(cached.get(1.0), 1) + + cached.cache.clear() + self.assertEqual(cached.get(1), 2) + + def test_typed_dict(self): + cached = Cached(LRUCache(maxsize=2)) + self.assertEqual(cached.cache, cached.get_typed.cache(cached)) + + self.assertEqual(cached.get_typed(0), 0) + self.assertEqual(cached.get_typed(1), 1) + self.assertEqual(cached.get_typed(1), 1) + self.assertEqual(cached.get_typed(1.0), 2) + self.assertEqual(cached.get_typed(1.0), 2) + self.assertEqual(cached.get_typed(0.0), 3) + self.assertEqual(cached.get_typed(0), 4) + + def test_lru(self): cached = Cached(LRUCache(maxsize=2)) self.assertEqual(cached.cache, cached.get.cache(cached)) @@ -39,7 +65,7 @@ class CachedMethodTest(unittest.TestCase): cached.cache.clear() self.assertEqual(cached.get(1), 2) - def test_typed_decorator(self): + def test_typed_lru(self): cached = Cached(LRUCache(maxsize=2)) self.assertEqual(cached.cache, cached.get_typed.cache(cached)) @@ -51,9 +77,9 @@ class CachedMethodTest(unittest.TestCase): self.assertEqual(cached.get_typed(0.0), 3) self.assertEqual(cached.get_typed(0), 4) - def test_decorator_nocache(self): - cached = Cached(None) - self.assertEqual(None, cached.get.cache(cached)) + def test_nospace(self): + cached = Cached(LRUCache(maxsize=0)) + self.assertEqual(cached.cache, cached.get.cache(cached)) self.assertEqual(cached.get(0), 0) self.assertEqual(cached.get(1), 1) @@ -61,12 +87,41 @@ class CachedMethodTest(unittest.TestCase): self.assertEqual(cached.get(1.0), 3) self.assertEqual(cached.get(1.0), 4) - def test_decorator_toolarge(self): - cached = Cached(LRUCache(maxsize=0)) - self.assertEqual(cached.cache, cached.get.cache(cached)) + def test_nocache(self): + cached = Cached(None) + self.assertEqual(None, cached.get.cache(cached)) self.assertEqual(cached.get(0), 0) self.assertEqual(cached.get(1), 1) self.assertEqual(cached.get(1), 2) self.assertEqual(cached.get(1.0), 3) self.assertEqual(cached.get(1.0), 4) + + def test_weakref(self): + import weakref + import fractions + + # in Python 3.4, `int` does not support weak references even + # when subclassed, but Fraction apparently does... + class Int(fractions.Fraction): + def __add__(self, other): + return Int(fractions.Fraction.__add__(self, other)) + + cached = Cached(weakref.WeakValueDictionary(), Int(0)) + self.assertEqual(cached.cache, cached.get.cache(cached)) + + self.assertEqual(cached.get(0), 0) + self.assertEqual(cached.get(0), 1) + + ref = cached.get(1) + self.assertEqual(ref, 2) + self.assertEqual(cached.get(1), 2) + self.assertEqual(cached.get(1.0), 2) + + ref = cached.get_typed(1) + self.assertEqual(ref, 3) + self.assertEqual(cached.get_typed(1), 3) + self.assertEqual(cached.get_typed(1.0), 4) + + cached.cache.clear() + self.assertEqual(cached.get(1), 5) -- cgit v1.2.3 From ec80673eb18a43f2f0299235e4b625b30322b664 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Sat, 6 Jun 2015 14:24:51 +0200 Subject: Fix #33: Code cleanup for improved PEP 8 conformance. --- cachetools/__init__.py | 11 ++-- cachetools/cache.py | 6 +-- cachetools/decorators.py | 97 ---------------------------------- cachetools/func.py | 120 ++++++++++++++++++++++++++++++++++++++++++ cachetools/lfu.py | 11 ---- cachetools/lock.py | 6 --- cachetools/lru.py | 11 ---- cachetools/method.py | 42 +++++++++++++++ cachetools/rr.py | 12 ----- cachetools/ttl.py | 11 ---- tests/test_cache.py | 3 +- tests/test_cachedmethod.py | 127 --------------------------------------------- tests/test_lfu.py | 3 +- tests/test_lru.py | 3 +- tests/test_method.py | 127 +++++++++++++++++++++++++++++++++++++++++++++ tests/test_rr.py | 5 +- tests/test_ttl.py | 3 +- 17 files changed, 307 insertions(+), 291 deletions(-) delete mode 100644 cachetools/decorators.py create mode 100644 cachetools/func.py delete mode 100644 cachetools/lock.py create mode 100644 cachetools/method.py delete mode 100644 tests/test_cachedmethod.py create mode 100644 tests/test_method.py diff --git a/cachetools/__init__.py b/cachetools/__init__.py index 6093731..14a068c 100644 --- a/cachetools/__init__.py +++ b/cachetools/__init__.py @@ -1,11 +1,12 @@ """Extensible memoizing collections and decorators""" from .cache import Cache -from .decorators import cachedmethod -from .lfu import LFUCache, lfu_cache -from .lru import LRUCache, lru_cache -from .rr import RRCache, rr_cache -from .ttl import TTLCache, ttl_cache +from .func import lfu_cache, lru_cache, rr_cache, ttl_cache +from .lfu import LFUCache +from .lru import LRUCache +from .method import cachedmethod +from .rr import RRCache +from .ttl import TTLCache __all__ = ( 'Cache', diff --git a/cachetools/cache.py b/cachetools/cache.py index 0c692f9..edb4986 100644 --- a/cachetools/cache.py +++ b/cachetools/cache.py @@ -1,10 +1,6 @@ import collections -def one(value): - return 1 - - class Cache(collections.MutableMapping): """Mutable mapping to serve as a simple cache or cache base class.""" @@ -13,7 +9,7 @@ class Cache(collections.MutableMapping): self.__currsize = 0 self.__maxsize = maxsize self.__missing = missing - self.__getsizeof = getsizeof or one + self.__getsizeof = getsizeof or (lambda x: 1) def __repr__(self): return '%s(%r, maxsize=%d, currsize=%d)' % ( diff --git a/cachetools/decorators.py b/cachetools/decorators.py deleted file mode 100644 index 7299251..0000000 --- a/cachetools/decorators.py +++ /dev/null @@ -1,97 +0,0 @@ -import collections -import functools - -CacheInfo = collections.namedtuple('CacheInfo', 'hits misses maxsize currsize') - - -class NullContext: - def __enter__(self): - pass - - def __exit__(self, exc_type, exc_val, exc_tb): - pass - -nullcontext = NullContext() - - -def makekey_untyped(args, kwargs): - return (args, tuple(sorted(kwargs.items()))) - - -def makekey_typed(args, kwargs): - key = makekey_untyped(args, kwargs) - key += tuple(type(v) for v in args) - key += tuple(type(v) for _, v in sorted(kwargs.items())) - return key - - -def cachedfunc(cache, typed=False, lock=None): - makekey = makekey_typed if typed else makekey_untyped - context = lock() if lock else nullcontext - - def decorator(func): - stats = [0, 0] - - def wrapper(*args, **kwargs): - key = makekey(args, kwargs) - with context: - try: - result = cache[key] - stats[0] += 1 - return result - except KeyError: - stats[1] += 1 - result = func(*args, **kwargs) - with context: - try: - cache[key] = result - except ValueError: - pass # value too large - return result - - def cache_info(): - with context: - hits, misses = stats - maxsize = cache.maxsize - currsize = cache.currsize - return CacheInfo(hits, misses, maxsize, currsize) - - def cache_clear(): - with context: - cache.clear() - - wrapper.cache_info = cache_info - wrapper.cache_clear = cache_clear - return functools.update_wrapper(wrapper, func) - - return decorator - - -def cachedmethod(cache, typed=False): - """Decorator to wrap a class or instance method with a memoizing - callable that saves results in a (possibly shared) cache. - - """ - makekey = makekey_typed if typed else makekey_untyped - - def decorator(method): - def wrapper(self, *args, **kwargs): - mapping = cache(self) - if mapping is None: - return method(self, *args, **kwargs) - key = makekey((method,) + args, kwargs) - try: - return mapping[key] - except KeyError: - pass - result = method(self, *args, **kwargs) - try: - mapping[key] = result - except ValueError: - pass # value too large - return result - - wrapper.cache = cache - return functools.update_wrapper(wrapper, method) - - return decorator diff --git a/cachetools/func.py b/cachetools/func.py new file mode 100644 index 0000000..4d88a78 --- /dev/null +++ b/cachetools/func.py @@ -0,0 +1,120 @@ +import collections +import functools +import random +import time + +from .lfu import LFUCache +from .lru import LRUCache +from .rr import RRCache +from .ttl import TTLCache + +try: + from threading import RLock +except ImportError: + from dummy_threading import RLock + + +_CacheInfo = collections.namedtuple('CacheInfo', [ + 'hits', 'misses', 'maxsize', 'currsize' +]) + + +class _NullContext: + def __enter__(self): + pass + + def __exit__(self, exc_type, exc_val, exc_tb): + pass + + +_nullcontext = _NullContext() + + +def _makekey_untyped(args, kwargs): + return (args, tuple(sorted(kwargs.items()))) + + +def _makekey_typed(args, kwargs): + key = _makekey_untyped(args, kwargs) + key += tuple(type(v) for v in args) + key += tuple(type(v) for _, v in sorted(kwargs.items())) + return key + + +def _cachedfunc(cache, typed=False, lock=None): + makekey = _makekey_typed if typed else _makekey_untyped + context = lock() if lock else _nullcontext + + def decorator(func): + stats = [0, 0] + + def wrapper(*args, **kwargs): + key = makekey(args, kwargs) + with context: + try: + result = cache[key] + stats[0] += 1 + return result + except KeyError: + stats[1] += 1 + result = func(*args, **kwargs) + with context: + try: + cache[key] = result + except ValueError: + pass # value too large + return result + + def cache_info(): + with context: + hits, misses = stats + maxsize = cache.maxsize + currsize = cache.currsize + return _CacheInfo(hits, misses, maxsize, currsize) + + def cache_clear(): + with context: + cache.clear() + + wrapper.cache_info = cache_info + wrapper.cache_clear = cache_clear + return functools.update_wrapper(wrapper, func) + + return decorator + + +def lfu_cache(maxsize=128, typed=False, getsizeof=None, lock=RLock): + """Decorator to wrap a function with a memoizing callable that saves + up to `maxsize` results based on a Least Frequently Used (LFU) + algorithm. + + """ + return _cachedfunc(LFUCache(maxsize, getsizeof), typed, lock) + + +def lru_cache(maxsize=128, typed=False, getsizeof=None, lock=RLock): + """Decorator to wrap a function with a memoizing callable that saves + up to `maxsize` results based on a Least Recently Used (LRU) + algorithm. + + """ + return _cachedfunc(LRUCache(maxsize, getsizeof), typed, lock) + + +def rr_cache(maxsize=128, choice=random.choice, typed=False, getsizeof=None, + lock=RLock): + """Decorator to wrap a function with a memoizing callable that saves + up to `maxsize` results based on a Random Replacement (RR) + algorithm. + + """ + return _cachedfunc(RRCache(maxsize, choice, getsizeof), typed, lock) + + +def ttl_cache(maxsize=128, ttl=600, timer=time.time, typed=False, + getsizeof=None, lock=RLock): + """Decorator to wrap a function with a memoizing callable that saves + up to `maxsize` results based on a Least Recently Used (LRU) + algorithm with a per-item time-to-live (TTL) value. + """ + return _cachedfunc(TTLCache(maxsize, ttl, timer, getsizeof), typed, lock) diff --git a/cachetools/lfu.py b/cachetools/lfu.py index 0afa9c9..d163cba 100644 --- a/cachetools/lfu.py +++ b/cachetools/lfu.py @@ -2,8 +2,6 @@ import collections import operator from .cache import Cache -from .decorators import cachedfunc -from .lock import RLock class LFUCache(Cache): @@ -33,12 +31,3 @@ class LFUCache(Cache): except ValueError: raise KeyError('cache is empty') return key, self.pop(key) - - -def lfu_cache(maxsize=128, typed=False, getsizeof=None, lock=RLock): - """Decorator to wrap a function with a memoizing callable that saves - up to `maxsize` results based on a Least Frequently Used (LFU) - algorithm. - - """ - return cachedfunc(LFUCache(maxsize, getsizeof), typed, lock) diff --git a/cachetools/lock.py b/cachetools/lock.py deleted file mode 100644 index 33c7c89..0000000 --- a/cachetools/lock.py +++ /dev/null @@ -1,6 +0,0 @@ -# flake8: noqa - -try: - from threading import RLock -except ImportError: - from dummy_threading import RLock diff --git a/cachetools/lru.py b/cachetools/lru.py index dbc9786..67ac797 100644 --- a/cachetools/lru.py +++ b/cachetools/lru.py @@ -1,6 +1,4 @@ from .cache import Cache -from .decorators import cachedfunc -from .lock import RLock class Link(object): @@ -85,12 +83,3 @@ class LRUCache(Cache): Cache.__delitem__(self, key) link.unlink() return (key, link.value) - - -def lru_cache(maxsize=128, typed=False, getsizeof=None, lock=RLock): - """Decorator to wrap a function with a memoizing callable that saves - up to `maxsize` results based on a Least Recently Used (LRU) - algorithm. - - """ - return cachedfunc(LRUCache(maxsize, getsizeof), typed, lock) diff --git a/cachetools/method.py b/cachetools/method.py new file mode 100644 index 0000000..2d04f44 --- /dev/null +++ b/cachetools/method.py @@ -0,0 +1,42 @@ +import functools + + +def _makekey_untyped(method, args, kwargs): + return (method, args, tuple(sorted(kwargs.items()))) + + +def _makekey_typed(method, args, kwargs): + key = _makekey_untyped(method, args, kwargs) + key += tuple(type(v) for v in args) + key += tuple(type(v) for _, v in sorted(kwargs.items())) + return key + + +def cachedmethod(cache, typed=False): + """Decorator to wrap a class or instance method with a memoizing + callable that saves results in a (possibly shared) cache. + + """ + makekey = _makekey_typed if typed else _makekey_untyped + + def decorator(method): + def wrapper(self, *args, **kwargs): + mapping = cache(self) + if mapping is None: + return method(self, *args, **kwargs) + key = makekey(method, args, kwargs) + try: + return mapping[key] + except KeyError: + pass + result = method(self, *args, **kwargs) + try: + mapping[key] = result + except ValueError: + pass # value too large + return result + + wrapper.cache = cache + return functools.update_wrapper(wrapper, method) + + return decorator diff --git a/cachetools/rr.py b/cachetools/rr.py index 5119c48..143223b 100644 --- a/cachetools/rr.py +++ b/cachetools/rr.py @@ -1,8 +1,6 @@ import random from .cache import Cache -from .decorators import cachedfunc -from .lock import RLock class RRCache(Cache): @@ -25,13 +23,3 @@ class RRCache(Cache): def choice(self): """The `choice` function used by the cache.""" return self.__choice - - -def rr_cache(maxsize=128, choice=random.choice, typed=False, getsizeof=None, - lock=RLock): - """Decorator to wrap a function with a memoizing callable that saves - up to `maxsize` results based on a Random Replacement (RR) - algorithm. - - """ - return cachedfunc(RRCache(maxsize, choice, getsizeof), typed, lock) diff --git a/cachetools/ttl.py b/cachetools/ttl.py index db35aa3..73c545a 100644 --- a/cachetools/ttl.py +++ b/cachetools/ttl.py @@ -2,8 +2,6 @@ import functools import time from .cache import Cache -from .decorators import cachedfunc -from .lock import RLock class Link(object): @@ -225,12 +223,3 @@ class TTLCache(Cache): get = __nested(Cache.get) pop = __nested(Cache.pop) setdefault = __nested(Cache.setdefault) - - -def ttl_cache(maxsize=128, ttl=600, timer=time.time, typed=False, - getsizeof=None, lock=RLock): - """Decorator to wrap a function with a memoizing callable that saves - up to `maxsize` results based on a Least Recently Used (LRU) - algorithm with a per-item time-to-live (TTL) value. - """ - return cachedfunc(TTLCache(maxsize, ttl, timer, getsizeof), typed, lock) diff --git a/tests/test_cache.py b/tests/test_cache.py index 433a733..3b78515 100644 --- a/tests/test_cache.py +++ b/tests/test_cache.py @@ -1,8 +1,9 @@ import unittest -from . import CacheTestMixin from cachetools import Cache +from . import CacheTestMixin + class CacheTest(unittest.TestCase, CacheTestMixin): diff --git a/tests/test_cachedmethod.py b/tests/test_cachedmethod.py deleted file mode 100644 index 1440352..0000000 --- a/tests/test_cachedmethod.py +++ /dev/null @@ -1,127 +0,0 @@ -import unittest -import operator - -from cachetools import LRUCache, cachedmethod - - -class Cached(object): - - count = 0 - - def __init__(self, cache, count=0): - self.cache = cache - self.count = count - - @cachedmethod(operator.attrgetter('cache')) - def get(self, value): - count = self.count - self.count += 1 - return count - - @cachedmethod(operator.attrgetter('cache'), typed=True) - def get_typed(self, value): - count = self.count - self.count += 1 - return count - - -class CachedMethodTest(unittest.TestCase): - - def test_dict(self): - cached = Cached({}) - self.assertEqual(cached.cache, cached.get.cache(cached)) - - self.assertEqual(cached.get(0), 0) - self.assertEqual(cached.get(1), 1) - self.assertEqual(cached.get(1), 1) - self.assertEqual(cached.get(1.0), 1) - self.assertEqual(cached.get(1.0), 1) - - cached.cache.clear() - self.assertEqual(cached.get(1), 2) - - def test_typed_dict(self): - cached = Cached(LRUCache(maxsize=2)) - self.assertEqual(cached.cache, cached.get_typed.cache(cached)) - - self.assertEqual(cached.get_typed(0), 0) - self.assertEqual(cached.get_typed(1), 1) - self.assertEqual(cached.get_typed(1), 1) - self.assertEqual(cached.get_typed(1.0), 2) - self.assertEqual(cached.get_typed(1.0), 2) - self.assertEqual(cached.get_typed(0.0), 3) - self.assertEqual(cached.get_typed(0), 4) - - def test_lru(self): - cached = Cached(LRUCache(maxsize=2)) - self.assertEqual(cached.cache, cached.get.cache(cached)) - - self.assertEqual(cached.get(0), 0) - self.assertEqual(cached.get(1), 1) - self.assertEqual(cached.get(1), 1) - self.assertEqual(cached.get(1.0), 1) - self.assertEqual(cached.get(1.0), 1) - - cached.cache.clear() - self.assertEqual(cached.get(1), 2) - - def test_typed_lru(self): - cached = Cached(LRUCache(maxsize=2)) - self.assertEqual(cached.cache, cached.get_typed.cache(cached)) - - self.assertEqual(cached.get_typed(0), 0) - self.assertEqual(cached.get_typed(1), 1) - self.assertEqual(cached.get_typed(1), 1) - self.assertEqual(cached.get_typed(1.0), 2) - self.assertEqual(cached.get_typed(1.0), 2) - self.assertEqual(cached.get_typed(0.0), 3) - self.assertEqual(cached.get_typed(0), 4) - - def test_nospace(self): - cached = Cached(LRUCache(maxsize=0)) - self.assertEqual(cached.cache, cached.get.cache(cached)) - - self.assertEqual(cached.get(0), 0) - self.assertEqual(cached.get(1), 1) - self.assertEqual(cached.get(1), 2) - self.assertEqual(cached.get(1.0), 3) - self.assertEqual(cached.get(1.0), 4) - - def test_nocache(self): - cached = Cached(None) - self.assertEqual(None, cached.get.cache(cached)) - - self.assertEqual(cached.get(0), 0) - self.assertEqual(cached.get(1), 1) - self.assertEqual(cached.get(1), 2) - self.assertEqual(cached.get(1.0), 3) - self.assertEqual(cached.get(1.0), 4) - - def test_weakref(self): - import weakref - import fractions - - # in Python 3.4, `int` does not support weak references even - # when subclassed, but Fraction apparently does... - class Int(fractions.Fraction): - def __add__(self, other): - return Int(fractions.Fraction.__add__(self, other)) - - cached = Cached(weakref.WeakValueDictionary(), Int(0)) - self.assertEqual(cached.cache, cached.get.cache(cached)) - - self.assertEqual(cached.get(0), 0) - self.assertEqual(cached.get(0), 1) - - ref = cached.get(1) - self.assertEqual(ref, 2) - self.assertEqual(cached.get(1), 2) - self.assertEqual(cached.get(1.0), 2) - - ref = cached.get_typed(1) - self.assertEqual(ref, 3) - self.assertEqual(cached.get_typed(1), 3) - self.assertEqual(cached.get_typed(1.0), 4) - - cached.cache.clear() - self.assertEqual(cached.get(1), 5) diff --git a/tests/test_lfu.py b/tests/test_lfu.py index 3b69647..1c4741b 100644 --- a/tests/test_lfu.py +++ b/tests/test_lfu.py @@ -1,8 +1,9 @@ import unittest -from . import CacheTestMixin, DecoratorTestMixin from cachetools import LFUCache, lfu_cache +from . import CacheTestMixin, DecoratorTestMixin + class LFUCacheTest(unittest.TestCase, CacheTestMixin, DecoratorTestMixin): diff --git a/tests/test_lru.py b/tests/test_lru.py index f21170d..5eea036 100644 --- a/tests/test_lru.py +++ b/tests/test_lru.py @@ -1,8 +1,9 @@ import unittest -from . import CacheTestMixin, DecoratorTestMixin from cachetools import LRUCache, lru_cache +from . import CacheTestMixin, DecoratorTestMixin + class LRUCacheTest(unittest.TestCase, CacheTestMixin, DecoratorTestMixin): diff --git a/tests/test_method.py b/tests/test_method.py new file mode 100644 index 0000000..82337e0 --- /dev/null +++ b/tests/test_method.py @@ -0,0 +1,127 @@ +import operator +import unittest + +from cachetools import LRUCache, cachedmethod + + +class Cached(object): + + count = 0 + + def __init__(self, cache, count=0): + self.cache = cache + self.count = count + + @cachedmethod(operator.attrgetter('cache')) + def get(self, value): + count = self.count + self.count += 1 + return count + + @cachedmethod(operator.attrgetter('cache'), typed=True) + def get_typed(self, value): + count = self.count + self.count += 1 + return count + + +class CachedMethodTest(unittest.TestCase): + + def test_dict(self): + cached = Cached({}) + self.assertEqual(cached.cache, cached.get.cache(cached)) + + self.assertEqual(cached.get(0), 0) + self.assertEqual(cached.get(1), 1) + self.assertEqual(cached.get(1), 1) + self.assertEqual(cached.get(1.0), 1) + self.assertEqual(cached.get(1.0), 1) + + cached.cache.clear() + self.assertEqual(cached.get(1), 2) + + def test_typed_dict(self): + cached = Cached(LRUCache(maxsize=2)) + self.assertEqual(cached.cache, cached.get_typed.cache(cached)) + + self.assertEqual(cached.get_typed(0), 0) + self.assertEqual(cached.get_typed(1), 1) + self.assertEqual(cached.get_typed(1), 1) + self.assertEqual(cached.get_typed(1.0), 2) + self.assertEqual(cached.get_typed(1.0), 2) + self.assertEqual(cached.get_typed(0.0), 3) + self.assertEqual(cached.get_typed(0), 4) + + def test_lru(self): + cached = Cached(LRUCache(maxsize=2)) + self.assertEqual(cached.cache, cached.get.cache(cached)) + + self.assertEqual(cached.get(0), 0) + self.assertEqual(cached.get(1), 1) + self.assertEqual(cached.get(1), 1) + self.assertEqual(cached.get(1.0), 1) + self.assertEqual(cached.get(1.0), 1) + + cached.cache.clear() + self.assertEqual(cached.get(1), 2) + + def test_typed_lru(self): + cached = Cached(LRUCache(maxsize=2)) + self.assertEqual(cached.cache, cached.get_typed.cache(cached)) + + self.assertEqual(cached.get_typed(0), 0) + self.assertEqual(cached.get_typed(1), 1) + self.assertEqual(cached.get_typed(1), 1) + self.assertEqual(cached.get_typed(1.0), 2) + self.assertEqual(cached.get_typed(1.0), 2) + self.assertEqual(cached.get_typed(0.0), 3) + self.assertEqual(cached.get_typed(0), 4) + + def test_nospace(self): + cached = Cached(LRUCache(maxsize=0)) + self.assertEqual(cached.cache, cached.get.cache(cached)) + + self.assertEqual(cached.get(0), 0) + self.assertEqual(cached.get(1), 1) + self.assertEqual(cached.get(1), 2) + self.assertEqual(cached.get(1.0), 3) + self.assertEqual(cached.get(1.0), 4) + + def test_nocache(self): + cached = Cached(None) + self.assertEqual(None, cached.get.cache(cached)) + + self.assertEqual(cached.get(0), 0) + self.assertEqual(cached.get(1), 1) + self.assertEqual(cached.get(1), 2) + self.assertEqual(cached.get(1.0), 3) + self.assertEqual(cached.get(1.0), 4) + + def test_weakref(self): + import weakref + import fractions + + # in Python 3.4, `int` does not support weak references even + # when subclassed, but Fraction apparently does... + class Int(fractions.Fraction): + def __add__(self, other): + return Int(fractions.Fraction.__add__(self, other)) + + cached = Cached(weakref.WeakValueDictionary(), Int(0)) + self.assertEqual(cached.cache, cached.get.cache(cached)) + + self.assertEqual(cached.get(0), 0) + self.assertEqual(cached.get(0), 1) + + ref = cached.get(1) + self.assertEqual(ref, 2) + self.assertEqual(cached.get(1), 2) + self.assertEqual(cached.get(1.0), 2) + + ref = cached.get_typed(1) + self.assertEqual(ref, 3) + self.assertEqual(cached.get_typed(1), 3) + self.assertEqual(cached.get_typed(1.0), 4) + + cached.cache.clear() + self.assertEqual(cached.get(1), 5) diff --git a/tests/test_rr.py b/tests/test_rr.py index 7af9863..b6d2f2c 100644 --- a/tests/test_rr.py +++ b/tests/test_rr.py @@ -1,9 +1,10 @@ -import unittest import random +import unittest -from . import CacheTestMixin, DecoratorTestMixin from cachetools import RRCache, rr_cache +from . import CacheTestMixin, DecoratorTestMixin + class RRCacheTest(unittest.TestCase, CacheTestMixin, DecoratorTestMixin): diff --git a/tests/test_ttl.py b/tests/test_ttl.py index d562c1c..c6253c2 100644 --- a/tests/test_ttl.py +++ b/tests/test_ttl.py @@ -1,8 +1,9 @@ import unittest -from . import CacheTestMixin, DecoratorTestMixin from cachetools import TTLCache, ttl_cache +from . import CacheTestMixin, DecoratorTestMixin + class Timer: def __init__(self, auto=False): -- cgit v1.2.3 From b26d1e5cedbbf7965538fe3c2f07f2aa40a395d0 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Sat, 6 Jun 2015 15:20:02 +0200 Subject: Prepare v1.0.1. --- CHANGES.rst | 11 +++++++++++ cachetools/__init__.py | 2 +- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index 880f17c..915a939 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,14 @@ +1.0.1 2015-06-06 +---------------- + +- Code cleanup for improved PEP 8 conformance. + +- Add documentation and unit tests for using ``@cachedmethod`` with + generic mutable mappings. + +- Improve documentation. + + 1.0.0 2014-12-19 ---------------- diff --git a/cachetools/__init__.py b/cachetools/__init__.py index 14a068c..b1360a1 100644 --- a/cachetools/__init__.py +++ b/cachetools/__init__.py @@ -15,4 +15,4 @@ __all__ = ( 'lfu_cache', 'lru_cache', 'rr_cache', 'ttl_cache', ) -__version__ = '1.0.0' +__version__ = '1.0.1' -- cgit v1.2.3 From 87753fea0f7f96cea89629d492a82292356ab350 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Sat, 6 Jun 2015 15:26:03 +0200 Subject: Update copyright. --- LICENSE | 2 +- README.rst | 2 +- docs/conf.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/LICENSE b/LICENSE index 380c344..aa77426 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ The MIT License (MIT) -Copyright (c) 2014 Thomas Kemmer +Copyright (c) 2014, 2015 Thomas Kemmer Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in diff --git a/README.rst b/README.rst index 255edff..284bd30 100644 --- a/README.rst +++ b/README.rst @@ -72,7 +72,7 @@ Project Resources License ------------------------------------------------------------------------ -Copyright (c) 2014 Thomas Kemmer. +Copyright (c) 2014, 2015 Thomas Kemmer. Licensed under the `MIT License`_. diff --git a/docs/conf.py b/docs/conf.py index bcfbeed..d675a0f 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -44,7 +44,7 @@ master_doc = 'index' # General information about the project. project = u'cachetools' -copyright = u'2014, Thomas Kemmer' +copyright = u'2014, 2015 Thomas Kemmer' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the -- cgit v1.2.3 From 7bc51c6ddeed0c644ed52ef23fdfaf10f24dc531 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Tue, 9 Jun 2015 07:27:57 +0200 Subject: Fix #41: Refactor getsizeof and missing default implementation. --- cachetools/cache.py | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/cachetools/cache.py b/cachetools/cache.py index edb4986..fec79d3 100644 --- a/cachetools/cache.py +++ b/cachetools/cache.py @@ -8,8 +8,10 @@ class Cache(collections.MutableMapping): self.__data = dict() self.__currsize = 0 self.__maxsize = maxsize - self.__missing = missing - self.__getsizeof = getsizeof or (lambda x: 1) + if missing: + self.__missing = missing + if getsizeof: + self.__getsizeof = getsizeof def __repr__(self): return '%s(%r, maxsize=%d, currsize=%d)' % ( @@ -49,13 +51,9 @@ class Cache(collections.MutableMapping): return key in self.__data def __missing__(self, key): - missing = self.__missing - if missing: - # return value as stored in data! - self.__setitem__(key, missing(key)) - return self.__data[key][0] - else: - raise KeyError(key) + self.__setitem__(key, self.__missing(key)) + # return value as stored in data + return self.__data[key][0] def __iter__(self): return iter(self.__data) @@ -63,6 +61,14 @@ class Cache(collections.MutableMapping): def __len__(self): return len(self.__data) + @staticmethod + def __getsizeof(value): + return 1 + + @staticmethod + def __missing(key): + raise KeyError(key) + @property def maxsize(self): """The maximum size of the cache.""" -- cgit v1.2.3 From 385ace836f2eb0f8979d62c037e25ea95de22f84 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Wed, 17 Jun 2015 20:58:57 +0200 Subject: Fix #40: Allow simple cache instances to be pickled. --- cachetools/cache.py | 2 +- cachetools/lru.py | 20 ++++++++------ cachetools/ttl.py | 80 ++++++++++++++++++++++++++++------------------------- tests/__init__.py | 24 ++++++++++++++++ tests/test_rr.py | 8 ++++-- 5 files changed, 85 insertions(+), 49 deletions(-) diff --git a/cachetools/cache.py b/cachetools/cache.py index fec79d3..f0f8d5c 100644 --- a/cachetools/cache.py +++ b/cachetools/cache.py @@ -30,7 +30,7 @@ class Cache(collections.MutableMapping): def __setitem__(self, key, value): data = self.__data maxsize = self.__maxsize - size = self.__getsizeof(value) + size = self.getsizeof(value) if size > maxsize: raise ValueError('value too large') if key not in data or data[key][1] < size: diff --git a/cachetools/lru.py b/cachetools/lru.py index 67ac797..606d87e 100644 --- a/cachetools/lru.py +++ b/cachetools/lru.py @@ -1,7 +1,7 @@ from .cache import Cache -class Link(object): +class _Link(object): __slots__ = 'key', 'value', 'prev', 'next' @@ -16,13 +16,8 @@ class LRUCache(Cache): """Least Recently Used (LRU) cache implementation.""" def __init__(self, maxsize, missing=None, getsizeof=None): - if getsizeof is not None: - getlinksize = lambda link: getsizeof(link.value) - Cache.__init__(self, maxsize, missing, getlinksize) - self.getsizeof = getsizeof - else: - Cache.__init__(self, maxsize, missing) - self.__root = root = Link() + Cache.__init__(self, maxsize, missing, getsizeof) + root = self.__root = _Link() root.prev = root.next = root def __repr__(self, cache_getitem=Cache.__getitem__): @@ -53,7 +48,7 @@ class LRUCache(Cache): oldlink = cache_getitem(self, key) else: oldlink = None - link = Link() + link = _Link() link.key = key link.value = value cache_setitem(self, key, link) @@ -73,6 +68,13 @@ class LRUCache(Cache): cache_delitem(self, key) link.unlink() + def getsizeof(self, value): + """Return the size of a cache element's value.""" + if isinstance(value, _Link): + return Cache.getsizeof(self, value.value) + else: + return Cache.getsizeof(self, value) + def popitem(self): """Remove and return the `(key, value)` pair least recently used.""" root = self.__root diff --git a/cachetools/ttl.py b/cachetools/ttl.py index 73c545a..d9ef173 100644 --- a/cachetools/ttl.py +++ b/cachetools/ttl.py @@ -4,7 +4,7 @@ import time from .cache import Cache -class Link(object): +class _Link(object): __slots__ = ( 'key', 'value', 'expire', 'size', @@ -12,9 +12,6 @@ class Link(object): 'lru_prev', 'lru_next' ) - def getsize(self): - return self.size - def unlink(self): ttl_next = self.ttl_next ttl_prev = self.ttl_prev @@ -27,7 +24,7 @@ class Link(object): lru_next.lru_prev = lru_prev -class NestedTimer(object): +class _NestedTimer(object): def __init__(self, timer): self.__timer = timer @@ -49,24 +46,25 @@ class NestedTimer(object): return self.__time def __getattr__(self, name): - # FIXME: for unittests timer.tick() return getattr(self.__timer, name) + def __getstate__(self): + return (self.__timer, self.__nesting) + + def __setstate__(self, state): + self.__timer, self.__nesting = state + class TTLCache(Cache): """LRU Cache implementation with per-item time-to-live (TTL) value.""" def __init__(self, maxsize, ttl, timer=time.time, missing=None, getsizeof=None): - if getsizeof is not None: - Cache.__init__(self, maxsize, missing, Link.getsize) - self.getsizeof = getsizeof - else: - Cache.__init__(self, maxsize, missing) - self.__timer = NestedTimer(timer) - self.__root = root = Link() + Cache.__init__(self, maxsize, missing, getsizeof) + root = self.__root = _Link() root.ttl_prev = root.ttl_next = root root.lru_prev = root.lru_next = root + self.__timer = _NestedTimer(timer) self.__ttl = ttl def __repr__(self, cache_getitem=Cache.__getitem__): @@ -97,18 +95,19 @@ class TTLCache(Cache): def __setitem__(self, key, value, cache_contains=Cache.__contains__, cache_getitem=Cache.__getitem__, - cache_setitem=Cache.__setitem__): + cache_setitem=Cache.__setitem__, + cache_getsizeof=Cache.getsizeof): with self.__timer as time: self.expire(time) if cache_contains(self, key): oldlink = cache_getitem(self, key) else: oldlink = None - link = Link() + link = _Link() link.key = key link.value = value link.expire = time + self.__ttl - link.size = self.getsizeof(value) + link.size = cache_getsizeof(self, value) cache_setitem(self, key, link) if oldlink: oldlink.unlink() @@ -162,6 +161,27 @@ class TTLCache(Cache): head = head.ttl_next return cache_len(self) - expired + @property + def currsize(self): + root = self.__root + head = root.ttl_next + expired = 0 + with self.__timer as time: + while head is not root and head.expire < time: + expired += head.size + head = head.ttl_next + return super(TTLCache, self).currsize - expired + + @property + def timer(self): + """The timer function used by the cache.""" + return self.__timer + + @property + def ttl(self): + """The time-to-live value of the cache's items.""" + return self.__ttl + def expire(self, time=None): """Remove expired items from the cache.""" if time is None: @@ -175,6 +195,13 @@ class TTLCache(Cache): head.unlink() head = next + def getsizeof(self, value): + """Return the size of a cache element's value.""" + if isinstance(value, _Link): + return value.size + else: + return Cache.getsizeof(self, value) + def popitem(self): """Remove and return the `(key, value)` pair least recently used that has not already expired. @@ -191,27 +218,6 @@ class TTLCache(Cache): link.unlink() return (key, link.value) - @property - def currsize(self): - root = self.__root - head = root.ttl_next - expired = 0 - with self.__timer as time: - while head is not root and head.expire < time: - expired += head.size - head = head.ttl_next - return super(TTLCache, self).currsize - expired - - @property - def timer(self): - """The timer function used by the cache.""" - return self.__timer - - @property - def ttl(self): - """The time-to-live value of the cache's items.""" - return self.__ttl - # mixin methods def __nested(method): diff --git a/tests/__init__.py b/tests/__init__.py index 2026b27..8ffb294 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -202,6 +202,30 @@ class CacheTestMixin(object): self.assertEqual(3, cache.currsize) self.assertEqual(3, cache[3]) + def test_cache_pickle(self): + import pickle + import sys + + cache = self.cache(maxsize=2) + cache.update({1: 1, 2: 2}) + if sys.version_info < (3, 0): + cache = pickle.loads(pickle.dumps(cache, -1)) + else: + cache = pickle.loads(pickle.dumps(cache)) + self.assertEqual(2, len(cache)) + self.assertEqual(1, cache[1]) + self.assertEqual(2, cache[2]) + + cache[3] = 3 + self.assertEqual(2, len(cache)) + self.assertEqual(3, cache[3]) + self.assertTrue(1 in cache or 2 in cache) + + cache[4] = 4 + self.assertEqual(2, len(cache)) + self.assertEqual(4, cache[4]) + self.assertTrue(1 in cache or 2 in cache or 3 in cache) + class DecoratorTestMixin(object): diff --git a/tests/test_rr.py b/tests/test_rr.py index b6d2f2c..41c9c6c 100644 --- a/tests/test_rr.py +++ b/tests/test_rr.py @@ -6,10 +6,14 @@ from cachetools import RRCache, rr_cache from . import CacheTestMixin, DecoratorTestMixin +# random.choice cannot be pickled... +def choice(seq): + return random.choice(seq) + + class RRCacheTest(unittest.TestCase, CacheTestMixin, DecoratorTestMixin): - def cache(self, maxsize, choice=random.choice, missing=None, - getsizeof=None): + def cache(self, maxsize, choice=choice, missing=None, getsizeof=None): return RRCache(maxsize, choice=choice, missing=missing, getsizeof=getsizeof) -- cgit v1.2.3 From 0ca640f2c88f5a6767bb09da8cab5983665b6d98 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Thu, 18 Jun 2015 19:24:52 +0200 Subject: Prepare v1.0.2. --- CHANGES.rst | 9 +++++++++ cachetools/__init__.py | 2 +- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index 915a939..759e764 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,12 @@ +1.0.2 2015-06-18 +---------------- + +- Allow simple cache instances to be pickled. + +- Refactor ``Cache.getsizeof`` and ``Cache.missing`` default + implementation. + + 1.0.1 2015-06-06 ---------------- diff --git a/cachetools/__init__.py b/cachetools/__init__.py index b1360a1..c4b4253 100644 --- a/cachetools/__init__.py +++ b/cachetools/__init__.py @@ -15,4 +15,4 @@ __all__ = ( 'lfu_cache', 'lru_cache', 'rr_cache', 'ttl_cache', ) -__version__ = '1.0.1' +__version__ = '1.0.2' -- cgit v1.2.3 From da4ea1a8a829f59caea0279deb9c25d0dabfbcda Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Wed, 24 Jun 2015 05:55:10 +0200 Subject: Fix #42: Clear cache statistics in clear_cache(). --- cachetools/func.py | 1 + tests/__init__.py | 42 ----------------------------- tests/test_func.py | 79 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ tests/test_lfu.py | 9 +++---- tests/test_lru.py | 9 +++---- tests/test_rr.py | 9 +++---- tests/test_ttl.py | 9 +++---- 7 files changed, 92 insertions(+), 66 deletions(-) create mode 100644 tests/test_func.py diff --git a/cachetools/func.py b/cachetools/func.py index 4d88a78..78ec7f6 100644 --- a/cachetools/func.py +++ b/cachetools/func.py @@ -74,6 +74,7 @@ def _cachedfunc(cache, typed=False, lock=None): def cache_clear(): with context: + stats[:] = [0, 0] cache.clear() wrapper.cache_info = cache_info diff --git a/tests/__init__.py b/tests/__init__.py index 8ffb294..be66f4b 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -225,45 +225,3 @@ class CacheTestMixin(object): self.assertEqual(2, len(cache)) self.assertEqual(4, cache[4]) self.assertTrue(1 in cache or 2 in cache or 3 in cache) - - -class DecoratorTestMixin(object): - - def decorator(self, maxsize, typed=False, lock=None): - raise NotImplementedError - - def test_decorator(self): - cached = self.decorator(maxsize=2)(lambda n: n) - - self.assertEqual(cached(1), 1) - self.assertEqual(cached.cache_info(), (0, 1, 2, 1)) - self.assertEqual(cached(1), 1) - self.assertEqual(cached.cache_info(), (1, 1, 2, 1)) - self.assertEqual(cached(1.0), 1.0) - self.assertEqual(cached.cache_info(), (2, 1, 2, 1)) - - cached.cache_clear() - self.assertEqual(cached(1), 1) - self.assertEqual(cached.cache_info(), (2, 2, 2, 1)) - - def test_typed_decorator(self): - cached = self.decorator(maxsize=2, typed=True)(lambda n: n) - - self.assertEqual(cached(1), 1) - self.assertEqual(cached.cache_info(), (0, 1, 2, 1)) - self.assertEqual(cached(1), 1) - self.assertEqual(cached.cache_info(), (1, 1, 2, 1)) - self.assertEqual(cached(1.0), 1.0) - self.assertEqual(cached.cache_info(), (1, 2, 2, 2)) - self.assertEqual(cached(1.0), 1.0) - self.assertEqual(cached.cache_info(), (2, 2, 2, 2)) - - def test_nocache_decorator(self): - cached = self.decorator(maxsize=0)(lambda n: n) - - self.assertEqual(cached(1), 1) - self.assertEqual(cached.cache_info(), (0, 1, 0, 0)) - self.assertEqual(cached(1), 1) - self.assertEqual(cached.cache_info(), (0, 2, 0, 0)) - self.assertEqual(cached(1.0), 1.0) - self.assertEqual(cached.cache_info(), (0, 3, 0, 0)) diff --git a/tests/test_func.py b/tests/test_func.py new file mode 100644 index 0000000..31e9a98 --- /dev/null +++ b/tests/test_func.py @@ -0,0 +1,79 @@ +import unittest + +import cachetools.func + + +class DecoratorTestMixin(object): + + def decorator(self, maxsize, typed=False, lock=None): + raise NotImplementedError + + def test_decorator(self): + cached = self.decorator(maxsize=2)(lambda n: n) + + self.assertEqual(cached.cache_info(), (0, 0, 2, 0)) + self.assertEqual(cached(1), 1) + self.assertEqual(cached.cache_info(), (0, 1, 2, 1)) + self.assertEqual(cached(1), 1) + self.assertEqual(cached.cache_info(), (1, 1, 2, 1)) + self.assertEqual(cached(1.0), 1.0) + self.assertEqual(cached.cache_info(), (2, 1, 2, 1)) + + def test_typed_decorator(self): + cached = self.decorator(maxsize=2, typed=True)(lambda n: n) + + self.assertEqual(cached.cache_info(), (0, 0, 2, 0)) + self.assertEqual(cached(1), 1) + self.assertEqual(cached.cache_info(), (0, 1, 2, 1)) + self.assertEqual(cached(1), 1) + self.assertEqual(cached.cache_info(), (1, 1, 2, 1)) + self.assertEqual(cached(1.0), 1.0) + self.assertEqual(cached.cache_info(), (1, 2, 2, 2)) + self.assertEqual(cached(1.0), 1.0) + self.assertEqual(cached.cache_info(), (2, 2, 2, 2)) + + def test_nosize_decorator(self): + cached = self.decorator(maxsize=0)(lambda n: n) + + self.assertEqual(cached.cache_info(), (0, 0, 0, 0)) + self.assertEqual(cached(1), 1) + self.assertEqual(cached.cache_info(), (0, 1, 0, 0)) + self.assertEqual(cached(1), 1) + self.assertEqual(cached.cache_info(), (0, 2, 0, 0)) + self.assertEqual(cached(1.0), 1.0) + self.assertEqual(cached.cache_info(), (0, 3, 0, 0)) + + def test_cache_clear(self): + cached = self.decorator(maxsize=2)(lambda n: n) + + self.assertEqual(cached.cache_info(), (0, 0, 2, 0)) + self.assertEqual(cached(1), 1) + self.assertEqual(cached.cache_info(), (0, 1, 2, 1)) + cached.cache_clear() + self.assertEqual(cached.cache_info(), (0, 0, 2, 0)) + self.assertEqual(cached(1), 1) + self.assertEqual(cached.cache_info(), (0, 1, 2, 1)) + + +class LFUDecoratorTest(unittest.TestCase, DecoratorTestMixin): + + def decorator(self, maxsize, typed=False, lock=None): + return cachetools.func.lfu_cache(maxsize, typed=typed, lock=lock) + + +class LRUDecoratorTest(unittest.TestCase, DecoratorTestMixin): + + def decorator(self, maxsize, typed=False, lock=None): + return cachetools.func.lru_cache(maxsize, typed=typed, lock=lock) + + +class RRDecoratorTest(unittest.TestCase, DecoratorTestMixin): + + def decorator(self, maxsize, typed=False, lock=None): + return cachetools.func.rr_cache(maxsize, typed=typed, lock=lock) + + +class TTLDecoratorTest(unittest.TestCase, DecoratorTestMixin): + + def decorator(self, maxsize, typed=False, lock=None): + return cachetools.func.ttl_cache(maxsize, typed=typed, lock=lock) diff --git a/tests/test_lfu.py b/tests/test_lfu.py index 1c4741b..fe0437e 100644 --- a/tests/test_lfu.py +++ b/tests/test_lfu.py @@ -1,18 +1,15 @@ import unittest -from cachetools import LFUCache, lfu_cache +from cachetools import LFUCache -from . import CacheTestMixin, DecoratorTestMixin +from . import CacheTestMixin -class LFUCacheTest(unittest.TestCase, CacheTestMixin, DecoratorTestMixin): +class LFUCacheTest(unittest.TestCase, CacheTestMixin): def cache(self, maxsize, missing=None, getsizeof=None): return LFUCache(maxsize, missing=missing, getsizeof=getsizeof) - def decorator(self, maxsize, typed=False, lock=None): - return lfu_cache(maxsize, typed=typed, lock=lock) - def test_lfu(self): cache = self.cache(maxsize=2) diff --git a/tests/test_lru.py b/tests/test_lru.py index 5eea036..a94a517 100644 --- a/tests/test_lru.py +++ b/tests/test_lru.py @@ -1,18 +1,15 @@ import unittest -from cachetools import LRUCache, lru_cache +from cachetools import LRUCache -from . import CacheTestMixin, DecoratorTestMixin +from . import CacheTestMixin -class LRUCacheTest(unittest.TestCase, CacheTestMixin, DecoratorTestMixin): +class LRUCacheTest(unittest.TestCase, CacheTestMixin): def cache(self, maxsize, missing=None, getsizeof=None): return LRUCache(maxsize, missing=missing, getsizeof=getsizeof) - def decorator(self, maxsize, typed=False, lock=None): - return lru_cache(maxsize, typed=typed, lock=lock) - def test_lru(self): cache = self.cache(maxsize=2) diff --git a/tests/test_rr.py b/tests/test_rr.py index 41c9c6c..14a4b07 100644 --- a/tests/test_rr.py +++ b/tests/test_rr.py @@ -1,9 +1,9 @@ import random import unittest -from cachetools import RRCache, rr_cache +from cachetools import RRCache -from . import CacheTestMixin, DecoratorTestMixin +from . import CacheTestMixin # random.choice cannot be pickled... @@ -11,15 +11,12 @@ def choice(seq): return random.choice(seq) -class RRCacheTest(unittest.TestCase, CacheTestMixin, DecoratorTestMixin): +class RRCacheTest(unittest.TestCase, CacheTestMixin): def cache(self, maxsize, choice=choice, missing=None, getsizeof=None): return RRCache(maxsize, choice=choice, missing=missing, getsizeof=getsizeof) - def decorator(self, maxsize, choice=random.choice, typed=False, lock=None): - return rr_cache(maxsize, choice=choice, typed=typed, lock=lock) - def test_choice(self): cache = self.cache(maxsize=2, choice=min) self.assertEqual(min, cache.choice) diff --git a/tests/test_ttl.py b/tests/test_ttl.py index c6253c2..005ab31 100644 --- a/tests/test_ttl.py +++ b/tests/test_ttl.py @@ -1,8 +1,8 @@ import unittest -from cachetools import TTLCache, ttl_cache +from cachetools import TTLCache -from . import CacheTestMixin, DecoratorTestMixin +from . import CacheTestMixin class Timer: @@ -19,15 +19,12 @@ class Timer: self.time += 1 -class TTLCacheTest(unittest.TestCase, CacheTestMixin, DecoratorTestMixin): +class TTLCacheTest(unittest.TestCase, CacheTestMixin): def cache(self, maxsize, ttl=0, missing=None, getsizeof=None): return TTLCache(maxsize, ttl, timer=Timer(), missing=missing, getsizeof=getsizeof) - def decorator(self, maxsize, ttl=0, typed=False, lock=None): - return ttl_cache(maxsize, ttl, timer=Timer(), typed=typed, lock=lock) - def test_lru(self): cache = self.cache(maxsize=2) -- cgit v1.2.3 From a5712ec367d09d80048bc3b0fb4e363cc99d58b7 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Fri, 26 Jun 2015 07:58:07 +0200 Subject: Fix #44: Add Python "nightly". --- .travis.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 81a8f8a..c717378 100644 --- a/.travis.yml +++ b/.travis.yml @@ -4,9 +4,10 @@ python: - 3.2 - 3.3 - 3.4 +- nightly install: - pip install . coverage coveralls script: -- python setup.py nosetests +- nosetests after_success: - coveralls -- cgit v1.2.3 From 37024a77d122665e47593d6159503e9189024604 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Fri, 26 Jun 2015 08:06:17 +0200 Subject: Prepare v1.0.3. --- CHANGES.rst | 6 ++++++ cachetools/__init__.py | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index 759e764..ebac2ca 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,9 @@ +1.0.3 2015-06-26 +---------------- + +- Clear cache statistics when calling ``clear_cache()``. + + 1.0.2 2015-06-18 ---------------- diff --git a/cachetools/__init__.py b/cachetools/__init__.py index c4b4253..fc2ac0e 100644 --- a/cachetools/__init__.py +++ b/cachetools/__init__.py @@ -15,4 +15,4 @@ __all__ = ( 'lfu_cache', 'lru_cache', 'rr_cache', 'ttl_cache', ) -__version__ = '1.0.2' +__version__ = '1.0.3' -- cgit v1.2.3 From 804be3511e673ca64eb32ccea941b30c1f5823f1 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Tue, 23 Jun 2015 06:50:50 +0200 Subject: Fix #34: Add generic @cache decorator. --- cachetools/__init__.py | 8 ++-- cachetools/decorators.py | 86 ++++++++++++++++++++++++++++++++++ cachetools/func.py | 119 +++++++++++++++++++++++++++-------------------- cachetools/method.py | 42 ----------------- tests/__init__.py | 104 +++++++++++++++++++++++++++++++++++++++++ tests/test_func.py | 85 +++++++++++++++++++++++++-------- tests/test_wrapper.py | 17 +++++++ 7 files changed, 346 insertions(+), 115 deletions(-) create mode 100644 cachetools/decorators.py delete mode 100644 cachetools/method.py create mode 100644 tests/test_wrapper.py diff --git a/cachetools/__init__.py b/cachetools/__init__.py index fc2ac0e..f618ace 100644 --- a/cachetools/__init__.py +++ b/cachetools/__init__.py @@ -1,17 +1,17 @@ """Extensible memoizing collections and decorators""" from .cache import Cache +from .decorators import cache, cachedmethod, cachekey from .func import lfu_cache, lru_cache, rr_cache, ttl_cache from .lfu import LFUCache from .lru import LRUCache -from .method import cachedmethod from .rr import RRCache from .ttl import TTLCache __all__ = ( - 'Cache', - 'cachedmethod', - 'LFUCache', 'LRUCache', 'RRCache', 'TTLCache', + 'Cache', 'LFUCache', 'LRUCache', 'RRCache', 'TTLCache', + 'cache', 'cachedmethod', 'cachekey', + # make cachetools.func.* available for backwards compatibility 'lfu_cache', 'lru_cache', 'rr_cache', 'ttl_cache', ) diff --git a/cachetools/decorators.py b/cachetools/decorators.py new file mode 100644 index 0000000..f928bfa --- /dev/null +++ b/cachetools/decorators.py @@ -0,0 +1,86 @@ +import functools + + +def cachekey(*args, **kwargs): + return (args, tuple(sorted(kwargs.items()))) + + +def _typedkey(method, *args, **kwargs): + key = cachekey(method, *args, **kwargs) + key += tuple(type(v) for v in args) + key += tuple(type(v) for _, v in sorted(kwargs.items())) + return key + + +def cache(cache, key=cachekey, lock=None): + """Decorator to wrap a function with a memoizing callable that saves + results in a cache. + + """ + def decorator(func): + if cache is None: + def wrapper(*args, **kwargs): + return func(*args, **kwargs) + elif lock is None: + def wrapper(*args, **kwargs): + k = key(*args, **kwargs) + try: + return cache[k] + except KeyError: + pass # key not found + v = func(*args, **kwargs) + try: + cache[k] = v + except ValueError: + pass # value too large + return v + else: + def wrapper(*args, **kwargs): + k = key(*args, **kwargs) + try: + with lock: + return cache[k] + except KeyError: + pass # key not found + v = func(*args, **kwargs) + try: + with lock: + cache[k] = v + except ValueError: + pass # value too large + return v + functools.update_wrapper(wrapper, func) + if not hasattr(wrapper, '__wrapped__'): + wrapper.__wrapped__ = func # Python < 3.2 + return wrapper + return decorator + + +def cachedmethod(cache, typed=False): + """Decorator to wrap a class or instance method with a memoizing + callable that saves results in a (possibly shared) cache. + + """ + key = _typedkey if typed else cachekey + + def decorator(method): + def wrapper(self, *args, **kwargs): + c = cache(self) + if c is None: + return method(self, *args, **kwargs) + k = key(method, *args, **kwargs) + try: + return c[k] + except KeyError: + pass # key not found + v = method(self, *args, **kwargs) + try: + c[k] = v + except ValueError: + pass # value too large + return v + + wrapper.cache = cache + return functools.update_wrapper(wrapper, method) + + return decorator diff --git a/cachetools/func.py b/cachetools/func.py index 78ec7f6..c761ab3 100644 --- a/cachetools/func.py +++ b/cachetools/func.py @@ -1,121 +1,140 @@ +"""`functools.lru_cache` compatible memoizing function decorators""" + import collections import functools import random import time - -from .lfu import LFUCache -from .lru import LRUCache -from .rr import RRCache -from .ttl import TTLCache +import warnings try: from threading import RLock except ImportError: from dummy_threading import RLock +from .decorators import cachekey -_CacheInfo = collections.namedtuple('CacheInfo', [ - 'hits', 'misses', 'maxsize', 'currsize' -]) +__all__ = ('lfu_cache', 'lru_cache', 'rr_cache', 'ttl_cache') -class _NullContext: +class _NLock: def __enter__(self): pass - def __exit__(self, exc_type, exc_val, exc_tb): + def __exit__(self, *exc): pass +_CacheInfo = collections.namedtuple('CacheInfo', [ + 'hits', 'misses', 'maxsize', 'currsize' +]) -_nullcontext = _NullContext() +_marker = object() -def _makekey_untyped(args, kwargs): - return (args, tuple(sorted(kwargs.items()))) +def _deprecated(message, level=2): + warnings.warn('%s is deprecated' % message, DeprecationWarning, level) -def _makekey_typed(args, kwargs): - key = _makekey_untyped(args, kwargs) +def _typedkey(*args, **kwargs): + key = cachekey(*args, **kwargs) key += tuple(type(v) for v in args) key += tuple(type(v) for _, v in sorted(kwargs.items())) return key -def _cachedfunc(cache, typed=False, lock=None): - makekey = _makekey_typed if typed else _makekey_untyped - context = lock() if lock else _nullcontext - +def _cache(cache, typed=False, context=_marker): def decorator(func): + key = _typedkey if typed else cachekey + if context is _marker: + lock = RLock() + elif context is None: + lock = _NLock() + else: + lock = context() stats = [0, 0] - def wrapper(*args, **kwargs): - key = makekey(args, kwargs) - with context: - try: - result = cache[key] - stats[0] += 1 - return result - except KeyError: - stats[1] += 1 - result = func(*args, **kwargs) - with context: - try: - cache[key] = result - except ValueError: - pass # value too large - return result - def cache_info(): - with context: + with lock: hits, misses = stats maxsize = cache.maxsize currsize = cache.currsize return _CacheInfo(hits, misses, maxsize, currsize) def cache_clear(): - with context: - stats[:] = [0, 0] - cache.clear() + with lock: + try: + cache.clear() + finally: + stats[:] = [0, 0] + def wrapper(*args, **kwargs): + k = key(*args, **kwargs) + with lock: + try: + v = cache[k] + stats[0] += 1 + return v + except KeyError: + stats[1] += 1 + v = func(*args, **kwargs) + try: + with lock: + cache[k] = v + except ValueError: + pass # value too large + return v + functools.update_wrapper(wrapper, func) + if not hasattr(wrapper, '__wrapped__'): + wrapper.__wrapped__ = func # Python < 3.2 wrapper.cache_info = cache_info wrapper.cache_clear = cache_clear - return functools.update_wrapper(wrapper, func) - + return wrapper return decorator -def lfu_cache(maxsize=128, typed=False, getsizeof=None, lock=RLock): +def lfu_cache(maxsize=128, typed=False, getsizeof=None, lock=_marker): """Decorator to wrap a function with a memoizing callable that saves up to `maxsize` results based on a Least Frequently Used (LFU) algorithm. """ - return _cachedfunc(LFUCache(maxsize, getsizeof), typed, lock) + from .lfu import LFUCache + if lock is not _marker: + _deprecated("Passing 'lock' to lfu_cache()", 3) + return _cache(LFUCache(maxsize, getsizeof), typed, lock) -def lru_cache(maxsize=128, typed=False, getsizeof=None, lock=RLock): +def lru_cache(maxsize=128, typed=False, getsizeof=None, lock=_marker): """Decorator to wrap a function with a memoizing callable that saves up to `maxsize` results based on a Least Recently Used (LRU) algorithm. """ - return _cachedfunc(LRUCache(maxsize, getsizeof), typed, lock) + from .lru import LRUCache + if lock is not _marker: + _deprecated("Passing 'lock' to lru_cache()", 3) + return _cache(LRUCache(maxsize, getsizeof), typed, lock) def rr_cache(maxsize=128, choice=random.choice, typed=False, getsizeof=None, - lock=RLock): + lock=_marker): """Decorator to wrap a function with a memoizing callable that saves up to `maxsize` results based on a Random Replacement (RR) algorithm. """ - return _cachedfunc(RRCache(maxsize, choice, getsizeof), typed, lock) + from .rr import RRCache + if lock is not _marker: + _deprecated("Passing 'lock' to rr_cache()", 3) + return _cache(RRCache(maxsize, choice, getsizeof), typed, lock) def ttl_cache(maxsize=128, ttl=600, timer=time.time, typed=False, - getsizeof=None, lock=RLock): + getsizeof=None, lock=_marker): """Decorator to wrap a function with a memoizing callable that saves up to `maxsize` results based on a Least Recently Used (LRU) algorithm with a per-item time-to-live (TTL) value. """ - return _cachedfunc(TTLCache(maxsize, ttl, timer, getsizeof), typed, lock) + from .ttl import TTLCache + if lock is not _marker: + _deprecated("Passing 'lock' to ttl_cache()", 3) + return _cache(TTLCache(maxsize, ttl, timer, getsizeof), typed, lock) diff --git a/cachetools/method.py b/cachetools/method.py deleted file mode 100644 index 2d04f44..0000000 --- a/cachetools/method.py +++ /dev/null @@ -1,42 +0,0 @@ -import functools - - -def _makekey_untyped(method, args, kwargs): - return (method, args, tuple(sorted(kwargs.items()))) - - -def _makekey_typed(method, args, kwargs): - key = _makekey_untyped(method, args, kwargs) - key += tuple(type(v) for v in args) - key += tuple(type(v) for _, v in sorted(kwargs.items())) - return key - - -def cachedmethod(cache, typed=False): - """Decorator to wrap a class or instance method with a memoizing - callable that saves results in a (possibly shared) cache. - - """ - makekey = _makekey_typed if typed else _makekey_untyped - - def decorator(method): - def wrapper(self, *args, **kwargs): - mapping = cache(self) - if mapping is None: - return method(self, *args, **kwargs) - key = makekey(method, args, kwargs) - try: - return mapping[key] - except KeyError: - pass - result = method(self, *args, **kwargs) - try: - mapping[key] = result - except ValueError: - pass # value too large - return result - - wrapper.cache = cache - return functools.update_wrapper(wrapper, method) - - return decorator diff --git a/tests/__init__.py b/tests/__init__.py index be66f4b..4036855 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -1,3 +1,6 @@ +import cachetools + + class CacheTestMixin(object): def cache(self, maxsize, missing=None, getsizeof=None): @@ -225,3 +228,104 @@ class CacheTestMixin(object): self.assertEqual(2, len(cache)) self.assertEqual(4, cache[4]) self.assertTrue(1 in cache or 2 in cache or 3 in cache) + + +class DecoratorTestMixin(object): + + def cache(self, minsize): + raise NotImplementedError + + def func(self, *args, **kwargs): + if hasattr(self, 'count'): + self.count += 1 + else: + self.count = 0 + return self.count + + def test_decorator(self): + cache = self.cache(2) + wrapper = cachetools.cache(cache)(self.func) + + self.assertEqual(len(cache), 0) + self.assertEqual(wrapper.__wrapped__, self.func) + + self.assertEqual(wrapper(0), 0) + self.assertEqual(len(cache), 1) + self.assertIn(cachetools.cachekey(0), cache) + self.assertNotIn(cachetools.cachekey(1), cache) + self.assertNotIn(cachetools.cachekey(1.0), cache) + + self.assertEqual(wrapper(1), 1) + self.assertEqual(len(cache), 2) + self.assertIn(cachetools.cachekey(0), cache) + self.assertIn(cachetools.cachekey(1), cache) + self.assertIn(cachetools.cachekey(1.0), cache) + + self.assertEqual(wrapper(1), 1) + self.assertEqual(len(cache), 2) + + self.assertEqual(wrapper(1.0), 1) + self.assertEqual(len(cache), 2) + + self.assertEqual(wrapper(1.0), 1) + self.assertEqual(len(cache), 2) + + def test_decorator_typed(self): + cache = self.cache(3) + + def typedkey(*args, **kwargs): + key = cachetools.cachekey(*args, **kwargs) + key += tuple(type(v) for v in args) + key += tuple(type(v) for _, v in sorted(kwargs.items())) + return key + wrapper = cachetools.cache(cache, key=typedkey)(self.func) + + self.assertEqual(len(cache), 0) + self.assertEqual(wrapper.__wrapped__, self.func) + + self.assertEqual(wrapper(0), 0) + self.assertEqual(len(cache), 1) + self.assertIn(typedkey(0), cache) + self.assertNotIn(typedkey(1), cache) + self.assertNotIn(typedkey(1.0), cache) + + self.assertEqual(wrapper(1), 1) + self.assertEqual(len(cache), 2) + self.assertIn(typedkey(0), cache) + self.assertIn(typedkey(1), cache) + self.assertNotIn(typedkey(1.0), cache) + + self.assertEqual(wrapper(1), 1) + self.assertEqual(len(cache), 2) + + self.assertEqual(wrapper(1.0), 2) + self.assertEqual(len(cache), 3) + self.assertIn(typedkey(0), cache) + self.assertIn(typedkey(1), cache) + self.assertIn(typedkey(1.0), cache) + + self.assertEqual(wrapper(1.0), 2) + self.assertEqual(len(cache), 3) + + def test_decorator_lock(self): + class Lock(object): + + count = 0 + + def __enter__(self): + Lock.count += 1 + + def __exit__(self, *exc): + pass + + cache = self.cache(2) + wrapper = cachetools.cache(cache, lock=Lock())(self.func) + + self.assertEqual(len(cache), 0) + self.assertEqual(wrapper.__wrapped__, self.func) + self.assertEqual(wrapper(0), 0) + self.assertEqual(Lock.count, 2) + self.assertEqual(wrapper(1), 1) + self.assertEqual(Lock.count, 4) + self.assertEqual(wrapper(1), 1) + self.assertEqual(Lock.count, 5) diff --git a/tests/test_func.py b/tests/test_func.py index 31e9a98..236a5d7 100644 --- a/tests/test_func.py +++ b/tests/test_func.py @@ -1,4 +1,5 @@ import unittest +import warnings import cachetools.func @@ -19,20 +20,18 @@ class DecoratorTestMixin(object): self.assertEqual(cached(1.0), 1.0) self.assertEqual(cached.cache_info(), (2, 1, 2, 1)) - def test_typed_decorator(self): - cached = self.decorator(maxsize=2, typed=True)(lambda n: n) + def test_decorator_clear(self): + cached = self.decorator(maxsize=2)(lambda n: n) self.assertEqual(cached.cache_info(), (0, 0, 2, 0)) self.assertEqual(cached(1), 1) self.assertEqual(cached.cache_info(), (0, 1, 2, 1)) + cached.cache_clear() + self.assertEqual(cached.cache_info(), (0, 0, 2, 0)) self.assertEqual(cached(1), 1) - self.assertEqual(cached.cache_info(), (1, 1, 2, 1)) - self.assertEqual(cached(1.0), 1.0) - self.assertEqual(cached.cache_info(), (1, 2, 2, 2)) - self.assertEqual(cached(1.0), 1.0) - self.assertEqual(cached.cache_info(), (2, 2, 2, 2)) + self.assertEqual(cached.cache_info(), (0, 1, 2, 1)) - def test_nosize_decorator(self): + def test_decorator_nosize(self): cached = self.decorator(maxsize=0)(lambda n: n) self.assertEqual(cached.cache_info(), (0, 0, 0, 0)) @@ -43,37 +42,85 @@ class DecoratorTestMixin(object): self.assertEqual(cached(1.0), 1.0) self.assertEqual(cached.cache_info(), (0, 3, 0, 0)) - def test_cache_clear(self): - cached = self.decorator(maxsize=2)(lambda n: n) + def test_decorator_typed(self): + cached = self.decorator(maxsize=2, typed=True)(lambda n: n) self.assertEqual(cached.cache_info(), (0, 0, 2, 0)) self.assertEqual(cached(1), 1) self.assertEqual(cached.cache_info(), (0, 1, 2, 1)) - cached.cache_clear() + self.assertEqual(cached(1), 1) + self.assertEqual(cached.cache_info(), (1, 1, 2, 1)) + self.assertEqual(cached(1.0), 1.0) + self.assertEqual(cached.cache_info(), (1, 2, 2, 2)) + self.assertEqual(cached(1.0), 1.0) + self.assertEqual(cached.cache_info(), (2, 2, 2, 2)) + + def test_decorator_lock(self): + class Lock(object): + count = 0 + + def __enter__(self): + Lock.count += 1 + + def __exit__(self, *exc): + pass + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + cached = self.decorator(maxsize=2, lock=Lock)(lambda n: n) + self.assertEqual(len(w), 1) + self.assertIs(w[0].category, DeprecationWarning) + + self.assertEqual(cached.cache_info(), (0, 0, 2, 0)) + self.assertEqual(Lock.count, 1) + self.assertEqual(cached(1), 1) + self.assertEqual(Lock.count, 3) + self.assertEqual(cached.cache_info(), (0, 1, 2, 1)) + self.assertEqual(Lock.count, 4) + self.assertEqual(cached(1), 1) + self.assertEqual(Lock.count, 5) + self.assertEqual(cached.cache_info(), (1, 1, 2, 1)) + self.assertEqual(Lock.count, 6) + self.assertEqual(cached(1.0), 1.0) + self.assertEqual(Lock.count, 7) + self.assertEqual(cached.cache_info(), (2, 1, 2, 1)) + self.assertEqual(Lock.count, 8) + + def test_decorator_nolock(self): + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + cached = self.decorator(maxsize=2, lock=None)(lambda n: n) + self.assertEqual(len(w), 1) + self.assertIs(w[0].category, DeprecationWarning) + self.assertEqual(cached.cache_info(), (0, 0, 2, 0)) self.assertEqual(cached(1), 1) self.assertEqual(cached.cache_info(), (0, 1, 2, 1)) + self.assertEqual(cached(1), 1) + self.assertEqual(cached.cache_info(), (1, 1, 2, 1)) + self.assertEqual(cached(1.0), 1.0) + self.assertEqual(cached.cache_info(), (2, 1, 2, 1)) class LFUDecoratorTest(unittest.TestCase, DecoratorTestMixin): - def decorator(self, maxsize, typed=False, lock=None): - return cachetools.func.lfu_cache(maxsize, typed=typed, lock=lock) + def decorator(self, maxsize, **kwargs): + return cachetools.func.lfu_cache(maxsize, **kwargs) class LRUDecoratorTest(unittest.TestCase, DecoratorTestMixin): - def decorator(self, maxsize, typed=False, lock=None): - return cachetools.func.lru_cache(maxsize, typed=typed, lock=lock) + def decorator(self, maxsize, **kwargs): + return cachetools.func.lru_cache(maxsize, **kwargs) class RRDecoratorTest(unittest.TestCase, DecoratorTestMixin): - def decorator(self, maxsize, typed=False, lock=None): - return cachetools.func.rr_cache(maxsize, typed=typed, lock=lock) + def decorator(self, maxsize, **kwargs): + return cachetools.func.rr_cache(maxsize, **kwargs) class TTLDecoratorTest(unittest.TestCase, DecoratorTestMixin): - def decorator(self, maxsize, typed=False, lock=None): - return cachetools.func.ttl_cache(maxsize, typed=typed, lock=lock) + def decorator(self, maxsize, **kwargs): + return cachetools.func.ttl_cache(maxsize, **kwargs) diff --git a/tests/test_wrapper.py b/tests/test_wrapper.py new file mode 100644 index 0000000..983bb16 --- /dev/null +++ b/tests/test_wrapper.py @@ -0,0 +1,17 @@ +import unittest + +import cachetools + +from . import DecoratorTestMixin + + +class CacheWrapperTest(unittest.TestCase, DecoratorTestMixin): + + def cache(self, minsize): + return cachetools.Cache(maxsize=minsize) + + +class DictWrapperTest(unittest.TestCase, DecoratorTestMixin): + + def cache(self, minsize): + return dict() -- cgit v1.2.3 From 1a4d59ac5fc97b8a22377a3ffccfc9895a24efb0 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Wed, 26 Aug 2015 18:00:21 +0200 Subject: Fix #43: Save hash value for decorator keys. --- cachetools/__init__.py | 82 +++++++++++++++++++++++++++++++++++-- cachetools/base.py | 109 +++++++++++++++++++++++++++++++++++++++++++++++++ cachetools/cache.py | 109 ------------------------------------------------- cachetools/func.py | 11 +---- cachetools/keys.py | 30 ++++++++++++++ cachetools/lfu.py | 2 +- cachetools/lru.py | 2 +- cachetools/rr.py | 2 +- cachetools/ttl.py | 2 +- tests/__init__.py | 14 +++---- tests/test_keys.py | 45 ++++++++++++++++++++ 11 files changed, 276 insertions(+), 132 deletions(-) create mode 100644 cachetools/base.py delete mode 100644 cachetools/cache.py create mode 100644 cachetools/keys.py create mode 100644 tests/test_keys.py diff --git a/cachetools/__init__.py b/cachetools/__init__.py index f618ace..42f2535 100644 --- a/cachetools/__init__.py +++ b/cachetools/__init__.py @@ -1,8 +1,10 @@ """Extensible memoizing collections and decorators""" -from .cache import Cache -from .decorators import cache, cachedmethod, cachekey +import functools + +from .base import Cache from .func import lfu_cache, lru_cache, rr_cache, ttl_cache +from .keys import hashkey, typedkey from .lfu import LFUCache from .lru import LRUCache from .rr import RRCache @@ -10,9 +12,83 @@ from .ttl import TTLCache __all__ = ( 'Cache', 'LFUCache', 'LRUCache', 'RRCache', 'TTLCache', - 'cache', 'cachedmethod', 'cachekey', + 'cache', 'cachedmethod', 'hashkey', 'typedkey', # make cachetools.func.* available for backwards compatibility 'lfu_cache', 'lru_cache', 'rr_cache', 'ttl_cache', ) __version__ = '1.0.3' + + +def cache(cache, key=hashkey, lock=None): + """Decorator to wrap a function with a memoizing callable that saves + results in a cache. + + """ + def decorator(func): + if cache is None: + def wrapper(*args, **kwargs): + return func(*args, **kwargs) + elif lock is None: + def wrapper(*args, **kwargs): + k = key(*args, **kwargs) + try: + return cache[k] + except KeyError: + pass # key not found + v = func(*args, **kwargs) + try: + cache[k] = v + except ValueError: + pass # value too large + return v + else: + def wrapper(*args, **kwargs): + k = key(*args, **kwargs) + try: + with lock: + return cache[k] + except KeyError: + pass # key not found + v = func(*args, **kwargs) + try: + with lock: + cache[k] = v + except ValueError: + pass # value too large + return v + functools.update_wrapper(wrapper, func) + if not hasattr(wrapper, '__wrapped__'): + wrapper.__wrapped__ = func # Python < 3.2 + return wrapper + return decorator + + +def cachedmethod(cache, typed=False): + """Decorator to wrap a class or instance method with a memoizing + callable that saves results in a (possibly shared) cache. + + """ + key = typedkey if typed else hashkey + + def decorator(method): + def wrapper(self, *args, **kwargs): + c = cache(self) + if c is None: + return method(self, *args, **kwargs) + k = key(method, *args, **kwargs) + try: + return c[k] + except KeyError: + pass # key not found + v = method(self, *args, **kwargs) + try: + c[k] = v + except ValueError: + pass # value too large + return v + + wrapper.cache = cache + return functools.update_wrapper(wrapper, method) + + return decorator diff --git a/cachetools/base.py b/cachetools/base.py new file mode 100644 index 0000000..f0f8d5c --- /dev/null +++ b/cachetools/base.py @@ -0,0 +1,109 @@ +import collections + + +class Cache(collections.MutableMapping): + """Mutable mapping to serve as a simple cache or cache base class.""" + + def __init__(self, maxsize, missing=None, getsizeof=None): + self.__data = dict() + self.__currsize = 0 + self.__maxsize = maxsize + if missing: + self.__missing = missing + if getsizeof: + self.__getsizeof = getsizeof + + def __repr__(self): + return '%s(%r, maxsize=%d, currsize=%d)' % ( + self.__class__.__name__, + list(self.items()), + self.__maxsize, + self.__currsize, + ) + + def __getitem__(self, key): + try: + return self.__data[key][0] + except KeyError: + return self.__missing__(key) + + def __setitem__(self, key, value): + data = self.__data + maxsize = self.__maxsize + size = self.getsizeof(value) + if size > maxsize: + raise ValueError('value too large') + if key not in data or data[key][1] < size: + while self.__currsize + size > maxsize: + self.popitem() + if key in data: + diffsize = size - data[key][1] + else: + diffsize = size + data[key] = (value, size) + self.__currsize += diffsize + + def __delitem__(self, key): + _, size = self.__data.pop(key) + self.__currsize -= size + + def __contains__(self, key): + return key in self.__data + + def __missing__(self, key): + self.__setitem__(key, self.__missing(key)) + # return value as stored in data + return self.__data[key][0] + + def __iter__(self): + return iter(self.__data) + + def __len__(self): + return len(self.__data) + + @staticmethod + def __getsizeof(value): + return 1 + + @staticmethod + def __missing(key): + raise KeyError(key) + + @property + def maxsize(self): + """The maximum size of the cache.""" + return self.__maxsize + + @property + def currsize(self): + """The current size of the cache.""" + return self.__currsize + + def getsizeof(self, value): + """Return the size of a cache element's value.""" + return self.__getsizeof(value) + + # collections.MutableMapping mixin methods do not handle __missing__ + + def get(self, key, default=None): + if key in self: + return self[key] + else: + return default + + __marker = object() + + def pop(self, key, default=__marker): + if key in self: + value = self[key] + del self[key] + return value + elif default is self.__marker: + raise KeyError(key) + else: + return default + + def setdefault(self, key, default=None): + if key not in self: + self[key] = default + return self[key] diff --git a/cachetools/cache.py b/cachetools/cache.py deleted file mode 100644 index f0f8d5c..0000000 --- a/cachetools/cache.py +++ /dev/null @@ -1,109 +0,0 @@ -import collections - - -class Cache(collections.MutableMapping): - """Mutable mapping to serve as a simple cache or cache base class.""" - - def __init__(self, maxsize, missing=None, getsizeof=None): - self.__data = dict() - self.__currsize = 0 - self.__maxsize = maxsize - if missing: - self.__missing = missing - if getsizeof: - self.__getsizeof = getsizeof - - def __repr__(self): - return '%s(%r, maxsize=%d, currsize=%d)' % ( - self.__class__.__name__, - list(self.items()), - self.__maxsize, - self.__currsize, - ) - - def __getitem__(self, key): - try: - return self.__data[key][0] - except KeyError: - return self.__missing__(key) - - def __setitem__(self, key, value): - data = self.__data - maxsize = self.__maxsize - size = self.getsizeof(value) - if size > maxsize: - raise ValueError('value too large') - if key not in data or data[key][1] < size: - while self.__currsize + size > maxsize: - self.popitem() - if key in data: - diffsize = size - data[key][1] - else: - diffsize = size - data[key] = (value, size) - self.__currsize += diffsize - - def __delitem__(self, key): - _, size = self.__data.pop(key) - self.__currsize -= size - - def __contains__(self, key): - return key in self.__data - - def __missing__(self, key): - self.__setitem__(key, self.__missing(key)) - # return value as stored in data - return self.__data[key][0] - - def __iter__(self): - return iter(self.__data) - - def __len__(self): - return len(self.__data) - - @staticmethod - def __getsizeof(value): - return 1 - - @staticmethod - def __missing(key): - raise KeyError(key) - - @property - def maxsize(self): - """The maximum size of the cache.""" - return self.__maxsize - - @property - def currsize(self): - """The current size of the cache.""" - return self.__currsize - - def getsizeof(self, value): - """Return the size of a cache element's value.""" - return self.__getsizeof(value) - - # collections.MutableMapping mixin methods do not handle __missing__ - - def get(self, key, default=None): - if key in self: - return self[key] - else: - return default - - __marker = object() - - def pop(self, key, default=__marker): - if key in self: - value = self[key] - del self[key] - return value - elif default is self.__marker: - raise KeyError(key) - else: - return default - - def setdefault(self, key, default=None): - if key not in self: - self[key] = default - return self[key] diff --git a/cachetools/func.py b/cachetools/func.py index c761ab3..7aa2daa 100644 --- a/cachetools/func.py +++ b/cachetools/func.py @@ -11,7 +11,7 @@ try: except ImportError: from dummy_threading import RLock -from .decorators import cachekey +from .keys import hashkey, typedkey __all__ = ('lfu_cache', 'lru_cache', 'rr_cache', 'ttl_cache') @@ -34,16 +34,9 @@ def _deprecated(message, level=2): warnings.warn('%s is deprecated' % message, DeprecationWarning, level) -def _typedkey(*args, **kwargs): - key = cachekey(*args, **kwargs) - key += tuple(type(v) for v in args) - key += tuple(type(v) for _, v in sorted(kwargs.items())) - return key - - def _cache(cache, typed=False, context=_marker): def decorator(func): - key = _typedkey if typed else cachekey + key = typedkey if typed else hashkey if context is _marker: lock = RLock() elif context is None: diff --git a/cachetools/keys.py b/cachetools/keys.py new file mode 100644 index 0000000..c1e9279 --- /dev/null +++ b/cachetools/keys.py @@ -0,0 +1,30 @@ +__all__ = ('hashkey', 'typedkey') + + +class _HashedSequence(tuple): + + # nonempty __slots__ not supported for subtype of 'tuple' + + def __init__(self, iterable): + self.__hash = tuple.__hash__(self) + + def __hash__(self): + return self.__hash + + def __add__(self, other): + return _HashedSequence(tuple.__add__(self, other)) + + def __radd__(self, other): + return _HashedSequence(tuple.__add__(other, self)) + + +def hashkey(*args, **kwargs): + # TODO: profile flattened tuple w/marker object(s) + return _HashedSequence((args, tuple(sorted(kwargs.items())))) + + +def typedkey(*args, **kwargs): + key = hashkey(*args, **kwargs) + key += tuple(type(v) for v in args) + key += tuple(type(v) for _, v in sorted(kwargs.items())) + return key diff --git a/cachetools/lfu.py b/cachetools/lfu.py index d163cba..26a8b1e 100644 --- a/cachetools/lfu.py +++ b/cachetools/lfu.py @@ -1,7 +1,7 @@ import collections import operator -from .cache import Cache +from .base import Cache class LFUCache(Cache): diff --git a/cachetools/lru.py b/cachetools/lru.py index 606d87e..0877707 100644 --- a/cachetools/lru.py +++ b/cachetools/lru.py @@ -1,4 +1,4 @@ -from .cache import Cache +from .base import Cache class _Link(object): diff --git a/cachetools/rr.py b/cachetools/rr.py index 143223b..4b3bcc6 100644 --- a/cachetools/rr.py +++ b/cachetools/rr.py @@ -1,6 +1,6 @@ import random -from .cache import Cache +from .base import Cache class RRCache(Cache): diff --git a/cachetools/ttl.py b/cachetools/ttl.py index d9ef173..082c5ec 100644 --- a/cachetools/ttl.py +++ b/cachetools/ttl.py @@ -1,7 +1,7 @@ import functools import time -from .cache import Cache +from .base import Cache class _Link(object): diff --git a/tests/__init__.py b/tests/__init__.py index 4036855..011a19e 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -251,15 +251,15 @@ class DecoratorTestMixin(object): self.assertEqual(wrapper(0), 0) self.assertEqual(len(cache), 1) - self.assertIn(cachetools.cachekey(0), cache) - self.assertNotIn(cachetools.cachekey(1), cache) - self.assertNotIn(cachetools.cachekey(1.0), cache) + self.assertIn(cachetools.hashkey(0), cache) + self.assertNotIn(cachetools.hashkey(1), cache) + self.assertNotIn(cachetools.hashkey(1.0), cache) self.assertEqual(wrapper(1), 1) self.assertEqual(len(cache), 2) - self.assertIn(cachetools.cachekey(0), cache) - self.assertIn(cachetools.cachekey(1), cache) - self.assertIn(cachetools.cachekey(1.0), cache) + self.assertIn(cachetools.hashkey(0), cache) + self.assertIn(cachetools.hashkey(1), cache) + self.assertIn(cachetools.hashkey(1.0), cache) self.assertEqual(wrapper(1), 1) self.assertEqual(len(cache), 2) @@ -274,7 +274,7 @@ class DecoratorTestMixin(object): cache = self.cache(3) def typedkey(*args, **kwargs): - key = cachetools.cachekey(*args, **kwargs) + key = cachetools.hashkey(*args, **kwargs) key += tuple(type(v) for v in args) key += tuple(type(v) for _, v in sorted(kwargs.items())) return key diff --git a/tests/test_keys.py b/tests/test_keys.py new file mode 100644 index 0000000..fe31f5d --- /dev/null +++ b/tests/test_keys.py @@ -0,0 +1,45 @@ +import unittest + +import cachetools + + +class CacheKeysTest(unittest.TestCase): + + def test_hashkey(self, key=cachetools.hashkey): + self.assertEqual(key(), key()) + self.assertEqual(hash(key()), hash(key())) + self.assertEqual(key(1, 2, 3), key(1, 2, 3)) + self.assertEqual(hash(key(1, 2, 3)), hash(key(1, 2, 3))) + self.assertEqual(key(1, 2, 3, x=0), key(1, 2, 3, x=0)) + self.assertEqual(hash(key(1, 2, 3, x=0)), hash(key(1, 2, 3, x=0))) + self.assertNotEqual(key(1, 2, 3), key(3, 2, 1)) + self.assertNotEqual(key(1, 2, 3), key(1, 2, 3, x=None)) + self.assertNotEqual(key(1, 2, 3, x=0), key(1, 2, 3, x=None)) + self.assertNotEqual(key(1, 2, 3, x=0), key(1, 2, 3, y=0)) + with self.assertRaises(TypeError): + key({}) + # untyped keys compare equal + self.assertEqual(key(1, 2, 3), key(1.0, 2.0, 3.0)) + self.assertEqual(hash(key(1, 2, 3)), hash(key(1.0, 2.0, 3.0))) + + def test_typedkey(self, key=cachetools.typedkey): + self.assertEqual(key(), key()) + self.assertEqual(hash(key()), hash(key())) + self.assertEqual(key(1, 2, 3), key(1, 2, 3)) + self.assertEqual(hash(key(1, 2, 3)), hash(key(1, 2, 3))) + self.assertEqual(key(1, 2, 3, x=0), key(1, 2, 3, x=0)) + self.assertEqual(hash(key(1, 2, 3, x=0)), hash(key(1, 2, 3, x=0))) + self.assertNotEqual(key(1, 2, 3), key(3, 2, 1)) + self.assertNotEqual(key(1, 2, 3), key(1, 2, 3, x=None)) + self.assertNotEqual(key(1, 2, 3, x=0), key(1, 2, 3, x=None)) + self.assertNotEqual(key(1, 2, 3, x=0), key(1, 2, 3, y=0)) + with self.assertRaises(TypeError): + key({}) + # typed keys compare unequal + self.assertNotEqual(key(1, 2, 3), key(1.0, 2.0, 3.0)) + + def test_addkeys(self, key=cachetools.hashkey): + self.assertIsInstance(key(), tuple) + self.assertIsInstance(key(1, 2, 3) + key(4, 5, 6), type(key())) + self.assertIsInstance(key(1, 2, 3) + (4, 5, 6), type(key())) + self.assertIsInstance((1, 2, 3) + key(4, 5, 6), type(key())) -- cgit v1.2.3 From b00910d8f7e10849f11703321a049d9da461b4b0 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Wed, 26 Aug 2015 18:32:06 +0200 Subject: Add nosetests for Travis 3.5-dev. --- .travis.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index c717378..2043e54 100644 --- a/.travis.yml +++ b/.travis.yml @@ -4,9 +4,9 @@ python: - 3.2 - 3.3 - 3.4 -- nightly +- 3.5-dev install: -- pip install . coverage coveralls +- pip install . coverage coveralls nose script: - nosetests after_success: -- cgit v1.2.3 From 00f1aef41d96b32d0bac1d8afcfd69f030c046c0 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Wed, 26 Aug 2015 18:02:48 +0200 Subject: Fix #37: Add key argument to @cachedmethod. --- cachetools/__init__.py | 77 +++++++++++++++++++++++++++++++++++++------------- 1 file changed, 57 insertions(+), 20 deletions(-) diff --git a/cachetools/__init__.py b/cachetools/__init__.py index 42f2535..d044f2d 100644 --- a/cachetools/__init__.py +++ b/cachetools/__init__.py @@ -1,6 +1,7 @@ """Extensible memoizing collections and decorators""" import functools +import warnings from .base import Cache from .func import lfu_cache, lru_cache, rr_cache, ttl_cache @@ -19,6 +20,8 @@ __all__ = ( __version__ = '1.0.3' +_default = [] # evaluates to False + def cache(cache, key=hashkey, lock=None): """Decorator to wrap a function with a memoizing callable that saves @@ -64,31 +67,65 @@ def cache(cache, key=hashkey, lock=None): return decorator -def cachedmethod(cache, typed=False): +def cachedmethod(cache, key=_default, lock=None, typed=_default): """Decorator to wrap a class or instance method with a memoizing callable that saves results in a (possibly shared) cache. """ - key = typedkey if typed else hashkey + if key is not _default and not callable(key): + key, typed = _default, key + if typed is not _default: + warnings.warn("Passing 'typed' to cachedmethod() is deprecated, " + "use 'key=typedkey' instead", DeprecationWarning, 2) def decorator(method): - def wrapper(self, *args, **kwargs): - c = cache(self) - if c is None: - return method(self, *args, **kwargs) - k = key(method, *args, **kwargs) - try: - return c[k] - except KeyError: - pass # key not found - v = method(self, *args, **kwargs) - try: - c[k] = v - except ValueError: - pass # value too large - return v - - wrapper.cache = cache - return functools.update_wrapper(wrapper, method) + # pass method to default key function for backwards compatibilty + if key is _default: + makekey = functools.partial(typedkey if typed else hashkey, method) + else: + makekey = key # custom key function always receive method args + if lock is None: + def wrapper(self, *args, **kwargs): + c = cache(self) + if c is None: + return method(self, *args, **kwargs) + k = makekey(self, *args, **kwargs) + try: + return c[k] + except KeyError: + pass # key not found + v = method(self, *args, **kwargs) + try: + c[k] = v + except ValueError: + pass # value too large + return v + else: + def wrapper(self, *args, **kwargs): + c = cache(self) + if c is None: + return method(self, *args, **kwargs) + k = makekey(self, *args, **kwargs) + try: + with lock: + return c[k] + except KeyError: + pass # key not found + v = method(self, *args, **kwargs) + try: + with lock: + c[k] = v + except ValueError: + pass # value too large + return v + functools.update_wrapper(wrapper, method) + if not hasattr(wrapper, '__wrapped__'): + wrapper.__wrapped__ = method # Python < 3.2 + def getter(self): + warnings.warn('%s.cache is deprecated' % method.__name__, + DeprecationWarning, 2) + return cache(self) + wrapper.cache = getter + return wrapper return decorator -- cgit v1.2.3 From b6bc3508609fcdaab3cae50648a35ea44f7b6112 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Fri, 28 Aug 2015 16:30:38 +0200 Subject: Code/interface cleanup and documentation. --- README.rst | 12 +- cachetools/__init__.py | 30 +++-- cachetools/func.py | 2 +- cachetools/keys.py | 4 + docs/index.rst | 329 +++++++++++++++++++++++++++++++++++-------------- tests/__init__.py | 6 +- tests/test_method.py | 41 +++++- 7 files changed, 303 insertions(+), 121 deletions(-) diff --git a/README.rst b/README.rst index 284bd30..c7e1115 100644 --- a/README.rst +++ b/README.rst @@ -2,7 +2,7 @@ cachetools ======================================================================== This module provides various memoizing collections and decorators, -including a variant of the Python 3 Standard Library `@lru_cache`_ +including variants of the Python 3 Standard Library `@lru_cache`_ function decorator. .. code-block:: pycon @@ -28,12 +28,12 @@ which item(s) to discard based on a suitable `cache algorithm`_. In general, a cache's size is the total size of its items, and an item's size is a property or function of its value, e.g. the result of ``sys.getsizeof(value)``. For the trivial but common case that each -item counts as ``1``, irrespective of its value, a cache's size is -equal to the number of its items, or ``len(cache)``. +item counts as ``1``, a cache's size is equal to the number of its +items, or ``len(cache)``. -This module provides multiple cache implementations based on different -cache algorithms, as well as decorators for easily memoizing function -and method calls. +The `cachetools` module implements multiple cache classes based on +different caching algorithms, as well as decorators for easily +memoizing function and method calls. Installation diff --git a/cachetools/__init__.py b/cachetools/__init__.py index d044f2d..a64d5ee 100644 --- a/cachetools/__init__.py +++ b/cachetools/__init__.py @@ -1,4 +1,4 @@ -"""Extensible memoizing collections and decorators""" +"""Extensible memoizing collections and decorators.""" import functools import warnings @@ -13,7 +13,7 @@ from .ttl import TTLCache __all__ = ( 'Cache', 'LFUCache', 'LRUCache', 'RRCache', 'TTLCache', - 'cache', 'cachedmethod', 'hashkey', 'typedkey', + 'cached', 'cachedmethod', 'hashkey', 'typedkey', # make cachetools.func.* available for backwards compatibility 'lfu_cache', 'lru_cache', 'rr_cache', 'ttl_cache', ) @@ -22,8 +22,16 @@ __version__ = '1.0.3' _default = [] # evaluates to False +if hasattr(functools.update_wrapper(lambda f: f(), lambda: 42), '__wrapped__'): + _update_wrapper = functools.update_wrapper +else: + def _update_wrapper(wrapper, wrapped): + functools.update_wrapper(wrapper, wrapped) + wrapper.__wrapped__ = wrapped + return wrapper + -def cache(cache, key=hashkey, lock=None): +def cached(cache, key=hashkey, lock=None): """Decorator to wrap a function with a memoizing callable that saves results in a cache. @@ -60,16 +68,13 @@ def cache(cache, key=hashkey, lock=None): except ValueError: pass # value too large return v - functools.update_wrapper(wrapper, func) - if not hasattr(wrapper, '__wrapped__'): - wrapper.__wrapped__ = func # Python < 3.2 - return wrapper + return _update_wrapper(wrapper, func) return decorator def cachedmethod(cache, key=_default, lock=None, typed=_default): """Decorator to wrap a class or instance method with a memoizing - callable that saves results in a (possibly shared) cache. + callable that saves results in a cache. """ if key is not _default and not callable(key): @@ -107,21 +112,20 @@ def cachedmethod(cache, key=_default, lock=None, typed=_default): return method(self, *args, **kwargs) k = makekey(self, *args, **kwargs) try: - with lock: + with lock(self): return c[k] except KeyError: pass # key not found v = method(self, *args, **kwargs) try: - with lock: + with lock(self): c[k] = v except ValueError: pass # value too large return v - functools.update_wrapper(wrapper, method) - if not hasattr(wrapper, '__wrapped__'): - wrapper.__wrapped__ = method # Python < 3.2 + _update_wrapper(wrapper, method) + # deprecated wrapper attribute def getter(self): warnings.warn('%s.cache is deprecated' % method.__name__, DeprecationWarning, 2) diff --git a/cachetools/func.py b/cachetools/func.py index 7aa2daa..25b415a 100644 --- a/cachetools/func.py +++ b/cachetools/func.py @@ -1,4 +1,4 @@ -"""`functools.lru_cache` compatible memoizing function decorators""" +"""`functools.lru_cache` compatible memoizing function decorators.""" import collections import functools diff --git a/cachetools/keys.py b/cachetools/keys.py index c1e9279..c95bedd 100644 --- a/cachetools/keys.py +++ b/cachetools/keys.py @@ -19,11 +19,15 @@ class _HashedSequence(tuple): def hashkey(*args, **kwargs): + """Return a cache key for the specified hashable arguments.""" + # TODO: profile flattened tuple w/marker object(s) return _HashedSequence((args, tuple(sorted(kwargs.items())))) def typedkey(*args, **kwargs): + """Return a typed cache key for the specified hashable arguments.""" + key = hashkey(*args, **kwargs) key += tuple(type(v) for v in args) key += tuple(type(v) for _, v in sorted(kwargs.items())) diff --git a/docs/index.rst b/docs/index.rst index fe9a459..2b6730d 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -4,25 +4,9 @@ .. module:: cachetools This module provides various memoizing collections and decorators, -including a variant of the Python 3 Standard Library `@lru_cache`_ +including variants of the Python 3 Standard Library `@lru_cache`_ function decorator. -.. code-block:: pycon - - >>> from cachetools import LRUCache - >>> cache = LRUCache(maxsize=2) - >>> cache.update([('first', 1), ('second', 2)]) - >>> cache - LRUCache([('second', 2), ('first', 1)], maxsize=2, currsize=2) - >>> cache['third'] = 3 - >>> cache - LRUCache([('second', 2), ('third', 3)], maxsize=2, currsize=2) - >>> cache['second'] - 2 - >>> cache['fourth'] = 4 - >>> cache - LRUCache([('second', 2), ('fourth', 4)], maxsize=2, currsize=2) - For the purpose of this module, a *cache* is a mutable_ mapping_ of a fixed maximum size. When the cache is full, i.e. by adding another item the cache would exceed its maximum size, the cache must choose @@ -30,15 +14,21 @@ which item(s) to discard based on a suitable `cache algorithm`_. In general, a cache's size is the total size of its items, and an item's size is a property or function of its value, e.g. the result of ``sys.getsizeof(value)``. For the trivial but common case that each -item counts as :const:`1`, irrespective of its value, a cache's size -is equal to the number of its items, or ``len(cache)``. +item counts as :const:`1`, a cache's size is equal to the number of +its items, or ``len(cache)``. + +The :mod:`cachetools` module implements multiple cache classes based +on different caching algorithms, as well as decorators for easily +memoizing function and method calls. -This module provides multiple cache implementations based on different -cache algorithms, as well as decorators for easily memoizing function -and method calls. +.. versionchanged:: 1.1 + Moved :func:`functools.lru_cache` compatible decorators to the + :mod:`cachetools.func` module. For backwards compatibility, they + continue to be visible in this module as well. -Cache Implementations + +Cache implementations ------------------------------------------------------------------------ This module provides several classes implementing caches using @@ -151,103 +141,126 @@ of one argument used to retrieve the size of an item's value. expired by the current value returned by :attr:`timer`. -Function Decorators +Decorators ------------------------------------------------------------------------ -This module provides several memoizing function decorators similar to -the Python 3 Standard Library :func:`functools.lru_cache` decorator:: +The :mod:`cachetools` module provides decorators for easily caching +function and method calls. This can save time when a function is +often called with the same arguments:: - import cachetools - import urllib.request + from cachetools import cached - @cachetools.lru_cache(maxsize=4) - def get_pep(num): - """Retrieve text of a Python Enhancement Proposal""" - url = 'http://www.python.org/dev/peps/pep-%04d/' % num - with urllib.request.urlopen(url) as s: - return s.read() + @cached(cache={}) + def fib(n): + return n if n < 2 else fib(n - 1) + fib(n - 2) - for n in 8, 290, 308, 320, 8, 218, 320, 279, 289, 320, 9991: - try: - print(n, len(get_pep(n))) - except urllib.error.HTTPError: - print(n, 'Not Found') - print(get_pep.cache_info()) + for i in range(100): + print('fib(%d) = %d' % (i, fib(i))) -In addition to a `maxsize` parameter, all decorators provide the -following optional keyword arguments: +.. decorator:: cached(cache, key=hashkey, lock=None) -- `typed`, if is set to :const:`True`, will cause function arguments - of different types to be cached separately. + Decorator to wrap a function with a memoizing callable that saves + results in a cache. -- `getsizeof` specifies a function of one argument that will be - applied to each cache value to determine its size. The default - value is :const:`None`, which will assign each item an equal size of - :const:`1`. + The `cache` argument specifies a cache object to store previous + function arguments and return values. Note that `cache` need not + be an instance of the cache implementations provided by the + :mod:`cachetools` module. :func:`cached` will work with any + mutable mapping type, for example plain :class:`dict` or + :class:`weakref.WeakValueDictionary`. -- `lock` specifies a function of zero arguments that returns a - `context manager`_ to lock the cache when necessary. If not - specified, :class:`threading.RLock` will be used to synchronize - access from multiple threads. + `key` will be called with the same positional and keyword arguments + as the wrapped function itself, and has to return a suitable cache + key object. Since caches are implemented as dictionaries, the + object returned by `key` must be hashable. The default is to call + :func:`hashkey`. -The wrapped function is instrumented with :func:`cache_info` and -:func:`cache_clear` functions to provide information about cache -performance and clear the cache. See the :func:`functools.lru_cache` -documentation for details. + If `lock` is not :const:`None`, it must specify an object + implementing the `context manager`_ protocol. Any access to the + cache will then be nested in a ``with lock:`` statement. This can + be used for synchronizing thread access by providing a + :class:`threading.RLock` instance, for example. -Like with :func:`functools.lru_cache`, the positional and keyword -arguments to the underlying function must be hashable. Note that -unlike :func:`functools.lru_cache`, setting `maxsize` to :const:`None` -is not supported. + .. note:: -.. decorator:: lfu_cache(maxsize=128, typed=False, getsizeof=None, lock=threading.RLock) + The `lock` context manager is used only to guard access to the + cache object. The underlying wrapped function will be called + outside the :keyword:`with` statement. - Decorator that wraps a function with a memoizing callable that - saves up to `maxsize` results based on a Least Frequently Used - (LFU) algorithm. + The original underlying function is accessible through the + :attr:`__wrapped__` attribute of the memoizing wrapper function. + This can be useful for introspection or for bypassing the cache. -.. decorator:: lru_cache(maxsize=128, typed=False, getsizeof=None, lock=threading.RLock) + To perform operations on the cache object directly, for example to + clear the cache during runtime, the cache can simply be assigned to + a variable. When a `lock` object is used, any access to the cache + from outside the function wrapper should also be performed within + an appropriate :keyword:`with` statement:: - Decorator that wraps a function with a memoizing callable that - saves up to `maxsize` results based on a Least Recently Used (LRU) - algorithm. + from threading import RLock + from cachetools import cached, LRUCache -.. decorator:: rr_cache(maxsize=128, choice=random.choice, typed=False, getsizeof=None, lock=threading.RLock) + cache = LRUCache(maxsize=100) + lock = RLock() - Decorator that wraps a function with a memoizing callable that - saves up to `maxsize` results based on a Random Replacement (RR) - algorithm. + @cached(cache, lock=lock) + def foo(n): + return n + 1 # expensive operation here... -.. decorator:: ttl_cache(maxsize=128, ttl=600, timer=time.time, typed=False, getsizeof=None, lock=threading.RLock) + # make sure access to cache is synchronized + with lock: + cache.clear() - Decorator to wrap a function with a memoizing callable that saves - up to `maxsize` results based on a Least Recently Used (LRU) - algorithm with a per-item time-to-live (TTL) value. + It is also possible to use a single shared cache object with + multiple functions. However, care must be taken that different + cache keys are generated for each function, even for identical + function arguments:: + from functools import partial + from cachetools import cached, hashkey, LRUCache -Method Decorators ------------------------------------------------------------------------- + cache = LRUCache(maxsize=100) + + @cached(cache, key=partial(hashkey, 'foo')) + def foo(n): + return n + n + + @cached(cache, key=partial(hashkey, 'bar')) + def bar(n): + return n * n + + foo(42) + bar(42) + print(cache) + + .. versionadded:: 1.1 -.. decorator:: cachedmethod(cache, typed=False) +.. decorator:: cachedmethod(cache, key=hashkey, lock=None, typed=False) - `cache` specifies a function of one argument that, when passed - :const:`self`, will return a cache object for the respective - instance or class. If `cache(self)` returns :const:`None`, the - original underlying method is called directly and the result is not - cached. The `cache` function is also available as the wrapped - function's :attr:`cache` attribute. Multiple methods of an object - or class may share the same cache object, but it is the user's - responsibility to handle concurrent cache access in a - multi-threaded environment. + Decorator to wrap a class or instance method with a memoizing + callable that saves results in a (possibly shared) cache. - Note that the objects returned from `cache` are not required to be - instances of the cache implementations provided by this module. - :func:`cachedmethod` should work with any mutable mapping type, be - it plain :class:`dict` or :class:`weakref.WeakValueDictionary`. + The main difference between this and the :func:`cached` function + decorator is that `cache` and `lock` are not passed objects, but + functions. Both will be called with :const:`self` as their sole + argument to retrieve the cache or lock object for the method's + respective instance or class. - One advantage of this decorator over the similar function - decorators is that cache properties such as `maxsize` can be set at - runtime:: + .. note:: + + As with :func:`cached`, the context manager obtained by calling + ``lock(self)`` will only guard access to the cache itself. It + is the user's responsibility to handle concurrent calls to the + underlying wrapped method in a multithreaded environment. + + If `key` or the optional `typed` keyword argument are set to + :const:`True`, the :func:`typedkey` function is used for generating + hash keys. This has been deprecated in favor of specifying + ``key=typedkey`` explicitly. + + One advantage of :func:`cachedmethod` over the :func:`cached` + function decorator is that cache properties such as `maxsize` can + be set at runtime:: import operator import urllib.request @@ -269,6 +282,132 @@ Method Decorators peps = CachedPEPs(cachesize=10) print("PEP #1: %s" % peps.get(1)) + .. versionadded:: 1.1 + + The optional `key` and `lock` parameters. + + .. versionchanged:: 1.1 + + The :attr:`__wrapped__` attribute is now set in Python 2, too. + + .. deprecated:: 1.1 + + The `typed` argument. Use ``key=typedkey`` instead. + + .. deprecated:: 1.1 + + The wrapper function's :attr:`cache` attribute. Use the + original function passed as the decorator's `cache` argument to + access the cache object. + + +Key functions +------------------------------------------------------------------------ + +The following functions can be used as key functions with the +:func:`cached` and :func:`cachedmethod` decorators: + +.. autofunction:: hashkey + + Returns a :class:`tuple` instance suitable as a cache key, provided + the positional and keywords arguments are hashable. + + .. versionadded:: 1.1 + +.. autofunction:: typedkey + + This function is similar to :func:`hashkey`, but arguments of + different types will yield distinct cache keys. For example, + ``typedkey(3)`` and ``typedkey(3.0)`` will return different + results. + + .. versionadded:: 1.1 + + +:mod:`cachetools.func` --- :func:`functools.lru_cache` compatible decorators +============================================================================ + +.. module:: cachetools.func + +To ease migration from (or to) Python 3's :func:`functools.lru_cache`, +this module provides several memoizing function decorators with a +similar API. All these decorators wrap a function with a memoizing +callable that saves up to the `maxsize` most recent calls, using +different caching strategies. Note that unlike +:func:`functools.lru_cache`, setting `maxsize` to :const:`None` is not +supported. + +The wrapped function is instrumented with :func:`cache_info` and +:func:`cache_clear` functions to provide information about cache +performance and clear the cache. See the :func:`functools.lru_cache` +documentation for details. + +In addition to `maxsize`, all decorators accept the following +optional keyword arguments: + +- `typed`, if is set to :const:`True`, will cause function arguments + of different types to be cached separately. For example, ``f(3)`` + and ``f(3.0)`` will be treated as distinct calls with distinct + results. + +- `getsizeof` specifies a function of one argument that will be + applied to each cache value to determine its size. The default + value is :const:`None`, which will assign each item an equal size of + :const:`1`. This has been deprecated in favor of the new + :func:`cachetools.cached` decorator, which allows passing fully + customized cache objects. + +- `lock` specifies a function of zero arguments that returns a + `context manager`_ to lock the cache when necessary. If not + specified, :class:`threading.RLock` will be used to synchronize + access from multiple threads. The use of `lock` is discouraged, and + the `lock` argument has been deprecated. + +.. versionadded:: 1.1 + + Formerly, the decorators provided by :mod:`cachetools.func` were + part of the :mod:`cachetools` module. + +.. decorator:: lfu_cache(maxsize=128, typed=False, getsizeof=None, lock=threading.RLock) + + Decorator that wraps a function with a memoizing callable that + saves up to `maxsize` results based on a Least Frequently Used + (LFU) algorithm. + + .. deprecated:: 1.1 + + The `getsizeof` and `lock` arguments. + +.. decorator:: lru_cache(maxsize=128, typed=False, getsizeof=None, lock=threading.RLock) + + Decorator that wraps a function with a memoizing callable that + saves up to `maxsize` results based on a Least Recently Used (LRU) + algorithm. + + .. deprecated:: 1.1 + + The `getsizeof` and `lock` arguments. + +.. decorator:: rr_cache(maxsize=128, choice=random.choice, typed=False, getsizeof=None, lock=threading.RLock) + + Decorator that wraps a function with a memoizing callable that + saves up to `maxsize` results based on a Random Replacement (RR) + algorithm. + + .. deprecated:: 1.1 + + The `getsizeof` and `lock` arguments. + +.. decorator:: ttl_cache(maxsize=128, ttl=600, timer=time.time, typed=False, getsizeof=None, lock=threading.RLock) + + Decorator to wrap a function with a memoizing callable that saves + up to `maxsize` results based on a Least Recently Used (LRU) + algorithm with a per-item time-to-live (TTL) value. + + .. deprecated:: 1.1 + + The `getsizeof` and `lock` arguments. + .. _@lru_cache: http://docs.python.org/3/library/functools.html#functools.lru_cache .. _mutable: http://docs.python.org/dev/glossary.html#term-mutable diff --git a/tests/__init__.py b/tests/__init__.py index 011a19e..9a17c1c 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -244,7 +244,7 @@ class DecoratorTestMixin(object): def test_decorator(self): cache = self.cache(2) - wrapper = cachetools.cache(cache)(self.func) + wrapper = cachetools.cached(cache)(self.func) self.assertEqual(len(cache), 0) self.assertEqual(wrapper.__wrapped__, self.func) @@ -278,7 +278,7 @@ class DecoratorTestMixin(object): key += tuple(type(v) for v in args) key += tuple(type(v) for _, v in sorted(kwargs.items())) return key - wrapper = cachetools.cache(cache, key=typedkey)(self.func) + wrapper = cachetools.cached(cache, key=typedkey)(self.func) self.assertEqual(len(cache), 0) self.assertEqual(wrapper.__wrapped__, self.func) @@ -319,7 +319,7 @@ class DecoratorTestMixin(object): pass cache = self.cache(2) - wrapper = cachetools.cache(cache, lock=Lock())(self.func) + wrapper = cachetools.cached(cache, lock=Lock())(self.func) self.assertEqual(len(cache), 0) self.assertEqual(wrapper.__wrapped__, self.func) diff --git a/tests/test_method.py b/tests/test_method.py index 82337e0..a78ac4b 100644 --- a/tests/test_method.py +++ b/tests/test_method.py @@ -6,8 +6,6 @@ from cachetools import LRUCache, cachedmethod class Cached(object): - count = 0 - def __init__(self, cache, count=0): self.cache = cache self.count = count @@ -25,6 +23,23 @@ class Cached(object): return count +class Locked(object): + + def __init__(self, cache): + self.cache = cache + self.count = 0 + + @cachedmethod(operator.attrgetter('cache'), lock=lambda self: self) + def get(self, value): + return self.count + + def __enter__(self): + self.count += 1 + + def __exit__(self, *exc): + pass + + class CachedMethodTest(unittest.TestCase): def test_dict(self): @@ -107,7 +122,7 @@ class CachedMethodTest(unittest.TestCase): def __add__(self, other): return Int(fractions.Fraction.__add__(self, other)) - cached = Cached(weakref.WeakValueDictionary(), Int(0)) + cached = Cached(weakref.WeakValueDictionary(), count=Int(0)) self.assertEqual(cached.cache, cached.get.cache(cached)) self.assertEqual(cached.get(0), 0) @@ -125,3 +140,23 @@ class CachedMethodTest(unittest.TestCase): cached.cache.clear() self.assertEqual(cached.get(1), 5) + + def test_locked_dict(self): + cached = Locked({}) + self.assertEqual(cached.cache, cached.get.cache(cached)) + + self.assertEqual(cached.get(0), 1) + self.assertEqual(cached.get(1), 3) + self.assertEqual(cached.get(1), 3) + self.assertEqual(cached.get(1.0), 3) + self.assertEqual(cached.get(2.0), 7) + + def test_nocache(self): + cached = Locked(None) + self.assertEqual(None, cached.get.cache(cached)) + + self.assertEqual(cached.get(0), 0) + self.assertEqual(cached.get(1), 0) + self.assertEqual(cached.get(1), 0) + self.assertEqual(cached.get(1.0), 0) + self.assertEqual(cached.get(1.0), 0) -- cgit v1.2.3 From 6a4c6f03a755340602bb786bcf87e47402e8dda7 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Fri, 28 Aug 2015 20:31:47 +0200 Subject: Prepare v1.1.0. --- CHANGES.rst | 21 +++++++++++++++++++++ cachetools/__init__.py | 2 +- 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index ebac2ca..cdbd2f7 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,24 @@ +1.1.0 2015-08-28 +---------------- + +- Add ``@cached`` function decorator. + +- Add ``hashkey`` and ``typedkey`` fuctions. + +- Add `key` and `lock` arguments to ``@cachedmethod``. + +- Set ``__wrapped__`` attributes for Python versions < 3.2. + +- Move ``functools`` compatible decorators to ``cachetools.func``. + +- Deprecate ``@cachedmethod`` `typed` argument. + +- Deprecate `cache` attribute for ``@cachedmethod`` wrappers. + +- Deprecate `getsizeof` and `lock` arguments for `cachetools.func` + decorator. + + 1.0.3 2015-06-26 ---------------- diff --git a/cachetools/__init__.py b/cachetools/__init__.py index a64d5ee..c24a13c 100644 --- a/cachetools/__init__.py +++ b/cachetools/__init__.py @@ -18,7 +18,7 @@ __all__ = ( 'lfu_cache', 'lru_cache', 'rr_cache', 'ttl_cache', ) -__version__ = '1.0.3' +__version__ = '1.1.0' _default = [] # evaluates to False -- cgit v1.2.3 From 58bd8c6937c24346e210b4c92db5dccf4220811a Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Fri, 28 Aug 2015 20:41:41 +0200 Subject: Remove unused decorators.py. --- cachetools/decorators.py | 86 ------------------------------------------------ 1 file changed, 86 deletions(-) delete mode 100644 cachetools/decorators.py diff --git a/cachetools/decorators.py b/cachetools/decorators.py deleted file mode 100644 index f928bfa..0000000 --- a/cachetools/decorators.py +++ /dev/null @@ -1,86 +0,0 @@ -import functools - - -def cachekey(*args, **kwargs): - return (args, tuple(sorted(kwargs.items()))) - - -def _typedkey(method, *args, **kwargs): - key = cachekey(method, *args, **kwargs) - key += tuple(type(v) for v in args) - key += tuple(type(v) for _, v in sorted(kwargs.items())) - return key - - -def cache(cache, key=cachekey, lock=None): - """Decorator to wrap a function with a memoizing callable that saves - results in a cache. - - """ - def decorator(func): - if cache is None: - def wrapper(*args, **kwargs): - return func(*args, **kwargs) - elif lock is None: - def wrapper(*args, **kwargs): - k = key(*args, **kwargs) - try: - return cache[k] - except KeyError: - pass # key not found - v = func(*args, **kwargs) - try: - cache[k] = v - except ValueError: - pass # value too large - return v - else: - def wrapper(*args, **kwargs): - k = key(*args, **kwargs) - try: - with lock: - return cache[k] - except KeyError: - pass # key not found - v = func(*args, **kwargs) - try: - with lock: - cache[k] = v - except ValueError: - pass # value too large - return v - functools.update_wrapper(wrapper, func) - if not hasattr(wrapper, '__wrapped__'): - wrapper.__wrapped__ = func # Python < 3.2 - return wrapper - return decorator - - -def cachedmethod(cache, typed=False): - """Decorator to wrap a class or instance method with a memoizing - callable that saves results in a (possibly shared) cache. - - """ - key = _typedkey if typed else cachekey - - def decorator(method): - def wrapper(self, *args, **kwargs): - c = cache(self) - if c is None: - return method(self, *args, **kwargs) - k = key(method, *args, **kwargs) - try: - return c[k] - except KeyError: - pass # key not found - v = method(self, *args, **kwargs) - try: - c[k] = v - except ValueError: - pass # value too large - return v - - wrapper.cache = cache - return functools.update_wrapper(wrapper, method) - - return decorator -- cgit v1.2.3 From 732d792ea2d04e3b994c4995439c7ed87129e0e7 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Mon, 31 Aug 2015 19:27:59 +0200 Subject: Fix redefinition of test_nocache. --- tests/test_method.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_method.py b/tests/test_method.py index a78ac4b..dee53f1 100644 --- a/tests/test_method.py +++ b/tests/test_method.py @@ -151,7 +151,7 @@ class CachedMethodTest(unittest.TestCase): self.assertEqual(cached.get(1.0), 3) self.assertEqual(cached.get(2.0), 7) - def test_nocache(self): + def test_locked_nocache(self): cached = Locked(None) self.assertEqual(None, cached.get.cache(cached)) -- cgit v1.2.3 From b666900e0aee4f34baca983898c74d8eaf7b8b22 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Sun, 6 Sep 2015 20:24:22 +0200 Subject: Fix #49: Add flake8 checks to .travis.yml --- .travis.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 2043e54..b8561a7 100644 --- a/.travis.yml +++ b/.travis.yml @@ -6,8 +6,9 @@ python: - 3.4 - 3.5-dev install: -- pip install . coverage coveralls nose +- pip install . coverage coveralls flake8 flake8-import-order nose script: +- flake8 - nosetests after_success: - coveralls -- cgit v1.2.3 From 82b80716de59ab0815d430a7b29c771eb2b68c35 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Mon, 31 Aug 2015 19:30:11 +0200 Subject: Fix #46: Improve key functions. --- cachetools/keys.py | 28 ++++++++++++++++------------ docs/index.rst | 26 ++++++++++++++++++++++++-- tests/test_keys.py | 4 ++-- 3 files changed, 42 insertions(+), 16 deletions(-) diff --git a/cachetools/keys.py b/cachetools/keys.py index c95bedd..887fb30 100644 --- a/cachetools/keys.py +++ b/cachetools/keys.py @@ -1,28 +1,32 @@ __all__ = ('hashkey', 'typedkey') -class _HashedSequence(tuple): +class _HashedTuple(tuple): - # nonempty __slots__ not supported for subtype of 'tuple' + __hashvalue = None - def __init__(self, iterable): - self.__hash = tuple.__hash__(self) + def __hash__(self, hash=tuple.__hash__): + hashvalue = self.__hashvalue + if hashvalue is None: + self.__hashvalue = hashvalue = hash(self) + return hashvalue - def __hash__(self): - return self.__hash + def __add__(self, other, add=tuple.__add__): + return _HashedTuple(add(self, other)) - def __add__(self, other): - return _HashedSequence(tuple.__add__(self, other)) + def __radd__(self, other, add=tuple.__add__): + return _HashedTuple(add(other, self)) - def __radd__(self, other): - return _HashedSequence(tuple.__add__(other, self)) +_kwmark = (object(),) def hashkey(*args, **kwargs): """Return a cache key for the specified hashable arguments.""" - # TODO: profile flattened tuple w/marker object(s) - return _HashedSequence((args, tuple(sorted(kwargs.items())))) + if kwargs: + return _HashedTuple(args + sum(sorted(kwargs.items()), _kwmark)) + else: + return _HashedTuple(args) def typedkey(*args, **kwargs): diff --git a/docs/index.rst b/docs/index.rst index 2b6730d..a81541b 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -309,8 +309,8 @@ The following functions can be used as key functions with the .. autofunction:: hashkey - Returns a :class:`tuple` instance suitable as a cache key, provided - the positional and keywords arguments are hashable. + This function returns a :class:`tuple` instance suitable as a cache + key, provided the positional and keywords arguments are hashable. .. versionadded:: 1.1 @@ -323,6 +323,28 @@ The following functions can be used as key functions with the .. versionadded:: 1.1 +These functions can also be helpful when implementing custom key +functions for handling some non-hashable arguments. For example, +calling the following function with a custom `env` argument will raise +a :class:`TypeError`, since :class:`dict` is not hashable:: + + @cached(LRUCache(maxsize=128) + def foo(x, y, z, env={}): + pass + +However, if `env` always holds only hashable values itself, a custom +key function can be written that handles the `env` keyword argument +specially:: + + def envkey(*args, env={}, **kwargs): + key = hashkey(*args, **kwargs) + key += tuple(env.items()) + return key + +This can then be used in the decorator declaration:: + + @cached(LRUCache(maxsize=128), key=envkey) + :mod:`cachetools.func` --- :func:`functools.lru_cache` compatible decorators ============================================================================ diff --git a/tests/test_keys.py b/tests/test_keys.py index fe31f5d..94184a5 100644 --- a/tests/test_keys.py +++ b/tests/test_keys.py @@ -17,7 +17,7 @@ class CacheKeysTest(unittest.TestCase): self.assertNotEqual(key(1, 2, 3, x=0), key(1, 2, 3, x=None)) self.assertNotEqual(key(1, 2, 3, x=0), key(1, 2, 3, y=0)) with self.assertRaises(TypeError): - key({}) + hash(key({})) # untyped keys compare equal self.assertEqual(key(1, 2, 3), key(1.0, 2.0, 3.0)) self.assertEqual(hash(key(1, 2, 3)), hash(key(1.0, 2.0, 3.0))) @@ -34,7 +34,7 @@ class CacheKeysTest(unittest.TestCase): self.assertNotEqual(key(1, 2, 3, x=0), key(1, 2, 3, x=None)) self.assertNotEqual(key(1, 2, 3, x=0), key(1, 2, 3, y=0)) with self.assertRaises(TypeError): - key({}) + hash(key({})) # typed keys compare unequal self.assertNotEqual(key(1, 2, 3), key(1.0, 2.0, 3.0)) -- cgit v1.2.3 From f5662251fe3506dbbb9cccc627572d08e4edee6f Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Mon, 7 Sep 2015 14:07:14 +0200 Subject: Improve unit test coverage. --- tests/__init__.py | 104 ----------------------------------- tests/test_method.py | 33 +++++++++++- tests/test_wrapper.py | 146 +++++++++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 176 insertions(+), 107 deletions(-) diff --git a/tests/__init__.py b/tests/__init__.py index 9a17c1c..be66f4b 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -1,6 +1,3 @@ -import cachetools - - class CacheTestMixin(object): def cache(self, maxsize, missing=None, getsizeof=None): @@ -228,104 +225,3 @@ class CacheTestMixin(object): self.assertEqual(2, len(cache)) self.assertEqual(4, cache[4]) self.assertTrue(1 in cache or 2 in cache or 3 in cache) - - -class DecoratorTestMixin(object): - - def cache(self, minsize): - raise NotImplementedError - - def func(self, *args, **kwargs): - if hasattr(self, 'count'): - self.count += 1 - else: - self.count = 0 - return self.count - - def test_decorator(self): - cache = self.cache(2) - wrapper = cachetools.cached(cache)(self.func) - - self.assertEqual(len(cache), 0) - self.assertEqual(wrapper.__wrapped__, self.func) - - self.assertEqual(wrapper(0), 0) - self.assertEqual(len(cache), 1) - self.assertIn(cachetools.hashkey(0), cache) - self.assertNotIn(cachetools.hashkey(1), cache) - self.assertNotIn(cachetools.hashkey(1.0), cache) - - self.assertEqual(wrapper(1), 1) - self.assertEqual(len(cache), 2) - self.assertIn(cachetools.hashkey(0), cache) - self.assertIn(cachetools.hashkey(1), cache) - self.assertIn(cachetools.hashkey(1.0), cache) - - self.assertEqual(wrapper(1), 1) - self.assertEqual(len(cache), 2) - - self.assertEqual(wrapper(1.0), 1) - self.assertEqual(len(cache), 2) - - self.assertEqual(wrapper(1.0), 1) - self.assertEqual(len(cache), 2) - - def test_decorator_typed(self): - cache = self.cache(3) - - def typedkey(*args, **kwargs): - key = cachetools.hashkey(*args, **kwargs) - key += tuple(type(v) for v in args) - key += tuple(type(v) for _, v in sorted(kwargs.items())) - return key - wrapper = cachetools.cached(cache, key=typedkey)(self.func) - - self.assertEqual(len(cache), 0) - self.assertEqual(wrapper.__wrapped__, self.func) - - self.assertEqual(wrapper(0), 0) - self.assertEqual(len(cache), 1) - self.assertIn(typedkey(0), cache) - self.assertNotIn(typedkey(1), cache) - self.assertNotIn(typedkey(1.0), cache) - - self.assertEqual(wrapper(1), 1) - self.assertEqual(len(cache), 2) - self.assertIn(typedkey(0), cache) - self.assertIn(typedkey(1), cache) - self.assertNotIn(typedkey(1.0), cache) - - self.assertEqual(wrapper(1), 1) - self.assertEqual(len(cache), 2) - - self.assertEqual(wrapper(1.0), 2) - self.assertEqual(len(cache), 3) - self.assertIn(typedkey(0), cache) - self.assertIn(typedkey(1), cache) - self.assertIn(typedkey(1.0), cache) - - self.assertEqual(wrapper(1.0), 2) - self.assertEqual(len(cache), 3) - - def test_decorator_lock(self): - class Lock(object): - - count = 0 - - def __enter__(self): - Lock.count += 1 - - def __exit__(self, *exc): - pass - - cache = self.cache(2) - wrapper = cachetools.cached(cache, lock=Lock())(self.func) - - self.assertEqual(len(cache), 0) - self.assertEqual(wrapper.__wrapped__, self.func) - self.assertEqual(wrapper(0), 0) - self.assertEqual(Lock.count, 2) - self.assertEqual(wrapper(1), 1) - self.assertEqual(Lock.count, 4) - self.assertEqual(wrapper(1), 1) - self.assertEqual(Lock.count, 5) diff --git a/tests/test_method.py b/tests/test_method.py index dee53f1..b0b9916 100644 --- a/tests/test_method.py +++ b/tests/test_method.py @@ -1,7 +1,8 @@ import operator import unittest +import warnings -from cachetools import LRUCache, cachedmethod +from cachetools import LRUCache, cachedmethod, typedkey class Cached(object): @@ -16,7 +17,7 @@ class Cached(object): self.count += 1 return count - @cachedmethod(operator.attrgetter('cache'), typed=True) + @cachedmethod(operator.attrgetter('cache'), key=typedkey) def get_typed(self, value): count = self.count self.count += 1 @@ -160,3 +161,31 @@ class CachedMethodTest(unittest.TestCase): self.assertEqual(cached.get(1), 0) self.assertEqual(cached.get(1.0), 0) self.assertEqual(cached.get(1.0), 0) + + def test_locked_nospace(self): + cached = Locked(LRUCache(maxsize=0)) + self.assertEqual(cached.cache, cached.get.cache(cached)) + + self.assertEqual(cached.get(0), 1) + self.assertEqual(cached.get(1), 3) + self.assertEqual(cached.get(1), 5) + self.assertEqual(cached.get(1.0), 7) + self.assertEqual(cached.get(1.0), 9) + + def test_typed_deprecated(self): + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + cachedmethod(lambda self: None, None)(lambda self: None) + self.assertIs(w[-1].category, DeprecationWarning) + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + cachedmethod(lambda self: None, False)(lambda self: None) + self.assertIs(w[-1].category, DeprecationWarning) + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + cachedmethod(lambda self: None, True)(lambda self: None) + self.assertIs(w[-1].category, DeprecationWarning) + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + cachedmethod(lambda self: None, typed=None)(lambda self: None) + self.assertIs(w[-1].category, DeprecationWarning) diff --git a/tests/test_wrapper.py b/tests/test_wrapper.py index 983bb16..1d03fb2 100644 --- a/tests/test_wrapper.py +++ b/tests/test_wrapper.py @@ -2,7 +2,106 @@ import unittest import cachetools -from . import DecoratorTestMixin + +class DecoratorTestMixin(object): + + def cache(self, minsize): + raise NotImplementedError + + def func(self, *args, **kwargs): + if hasattr(self, 'count'): + self.count += 1 + else: + self.count = 0 + return self.count + + def test_decorator(self): + cache = self.cache(2) + wrapper = cachetools.cached(cache)(self.func) + + self.assertEqual(len(cache), 0) + self.assertEqual(wrapper.__wrapped__, self.func) + + self.assertEqual(wrapper(0), 0) + self.assertEqual(len(cache), 1) + self.assertIn(cachetools.hashkey(0), cache) + self.assertNotIn(cachetools.hashkey(1), cache) + self.assertNotIn(cachetools.hashkey(1.0), cache) + + self.assertEqual(wrapper(1), 1) + self.assertEqual(len(cache), 2) + self.assertIn(cachetools.hashkey(0), cache) + self.assertIn(cachetools.hashkey(1), cache) + self.assertIn(cachetools.hashkey(1.0), cache) + + self.assertEqual(wrapper(1), 1) + self.assertEqual(len(cache), 2) + + self.assertEqual(wrapper(1.0), 1) + self.assertEqual(len(cache), 2) + + self.assertEqual(wrapper(1.0), 1) + self.assertEqual(len(cache), 2) + + def test_decorator_typed(self): + cache = self.cache(3) + + def typedkey(*args, **kwargs): + key = cachetools.hashkey(*args, **kwargs) + key += tuple(type(v) for v in args) + key += tuple(type(v) for _, v in sorted(kwargs.items())) + return key + wrapper = cachetools.cached(cache, key=typedkey)(self.func) + + self.assertEqual(len(cache), 0) + self.assertEqual(wrapper.__wrapped__, self.func) + + self.assertEqual(wrapper(0), 0) + self.assertEqual(len(cache), 1) + self.assertIn(typedkey(0), cache) + self.assertNotIn(typedkey(1), cache) + self.assertNotIn(typedkey(1.0), cache) + + self.assertEqual(wrapper(1), 1) + self.assertEqual(len(cache), 2) + self.assertIn(typedkey(0), cache) + self.assertIn(typedkey(1), cache) + self.assertNotIn(typedkey(1.0), cache) + + self.assertEqual(wrapper(1), 1) + self.assertEqual(len(cache), 2) + + self.assertEqual(wrapper(1.0), 2) + self.assertEqual(len(cache), 3) + self.assertIn(typedkey(0), cache) + self.assertIn(typedkey(1), cache) + self.assertIn(typedkey(1.0), cache) + + self.assertEqual(wrapper(1.0), 2) + self.assertEqual(len(cache), 3) + + def test_decorator_lock(self): + class Lock(object): + + count = 0 + + def __enter__(self): + Lock.count += 1 + + def __exit__(self, *exc): + pass + + cache = self.cache(2) + wrapper = cachetools.cached(cache, lock=Lock())(self.func) + + self.assertEqual(len(cache), 0) + self.assertEqual(wrapper.__wrapped__, self.func) + self.assertEqual(wrapper(0), 0) + self.assertEqual(Lock.count, 2) + self.assertEqual(wrapper(1), 1) + self.assertEqual(Lock.count, 4) + self.assertEqual(wrapper(1), 1) + self.assertEqual(Lock.count, 5) class CacheWrapperTest(unittest.TestCase, DecoratorTestMixin): @@ -10,8 +109,53 @@ class CacheWrapperTest(unittest.TestCase, DecoratorTestMixin): def cache(self, minsize): return cachetools.Cache(maxsize=minsize) + def test_zero_size_cache_decorator(self): + cache = self.cache(0) + wrapper = cachetools.cached(cache)(self.func) + + self.assertEqual(len(cache), 0) + self.assertEqual(wrapper.__wrapped__, self.func) + + self.assertEqual(wrapper(0), 0) + self.assertEqual(len(cache), 0) + + def test_zero_size_cache_decorator_lock(self): + class Lock(object): + + count = 0 + + def __enter__(self): + Lock.count += 1 + + def __exit__(self, *exc): + pass + + cache = self.cache(0) + wrapper = cachetools.cached(cache, lock=Lock())(self.func) + + self.assertEqual(len(cache), 0) + self.assertEqual(wrapper.__wrapped__, self.func) + + self.assertEqual(wrapper(0), 0) + self.assertEqual(len(cache), 0) + self.assertEqual(Lock.count, 2) + class DictWrapperTest(unittest.TestCase, DecoratorTestMixin): def cache(self, minsize): return dict() + + +class NoneWrapperTest(unittest.TestCase): + + def func(self, *args, **kwargs): + return args + tuple(kwargs.items()) + + def test_decorator(self): + wrapper = cachetools.cached(None)(self.func) + self.assertEqual(wrapper.__wrapped__, self.func) + + self.assertEqual(wrapper(0), (0,)) + self.assertEqual(wrapper(1), (1,)) + self.assertEqual(wrapper(1, foo='bar'), (1, ('foo', 'bar'))) -- cgit v1.2.3 From 5c7effc5a096a50b5c5fc167be3aaef051f29362 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Mon, 7 Sep 2015 20:21:56 +0200 Subject: Improve documentation. --- README.rst | 6 +++--- docs/index.rst | 58 ++++++++++++++++++++++++++++++---------------------------- 2 files changed, 33 insertions(+), 31 deletions(-) diff --git a/README.rst b/README.rst index c7e1115..227e8a3 100644 --- a/README.rst +++ b/README.rst @@ -31,9 +31,9 @@ size is a property or function of its value, e.g. the result of item counts as ``1``, a cache's size is equal to the number of its items, or ``len(cache)``. -The `cachetools` module implements multiple cache classes based on -different caching algorithms, as well as decorators for easily -memoizing function and method calls. +Multiple cache classes based on different caching algorithms are +implemented, and decorators for easily memoizing function and method +calls are provided, too. Installation diff --git a/docs/index.rst b/docs/index.rst index a81541b..ed2694b 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -17,9 +17,9 @@ size is a property or function of its value, e.g. the result of item counts as :const:`1`, a cache's size is equal to the number of its items, or ``len(cache)``. -The :mod:`cachetools` module implements multiple cache classes based -on different caching algorithms, as well as decorators for easily -memoizing function and method calls. +Multiple cache classes based on different caching algorithms are +implemented, and decorators for easily memoizing function and method +calls are provided, too. .. versionchanged:: 1.1 @@ -166,36 +166,36 @@ often called with the same arguments:: function arguments and return values. Note that `cache` need not be an instance of the cache implementations provided by the :mod:`cachetools` module. :func:`cached` will work with any - mutable mapping type, for example plain :class:`dict` or + mutable mapping type, including plain :class:`dict` and :class:`weakref.WeakValueDictionary`. - `key` will be called with the same positional and keyword arguments - as the wrapped function itself, and has to return a suitable cache - key object. Since caches are implemented as dictionaries, the - object returned by `key` must be hashable. The default is to call - :func:`hashkey`. + `key` specifies a functions that will be called with the same + positional and keyword arguments as the wrapped function itself, + and which has to return a suitable cache key. Since caches are + mappings, the object returned by `key` must be hashable. The + default is to call :func:`hashkey`. If `lock` is not :const:`None`, it must specify an object implementing the `context manager`_ protocol. Any access to the cache will then be nested in a ``with lock:`` statement. This can - be used for synchronizing thread access by providing a + be used for synchronizing thread access to the cache by providing a :class:`threading.RLock` instance, for example. .. note:: The `lock` context manager is used only to guard access to the cache object. The underlying wrapped function will be called - outside the :keyword:`with` statement. + outside the `with` statement, and must be thread-safe by itself. The original underlying function is accessible through the :attr:`__wrapped__` attribute of the memoizing wrapper function. - This can be useful for introspection or for bypassing the cache. + This can be used for introspection or for bypassing the cache. - To perform operations on the cache object directly, for example to - clear the cache during runtime, the cache can simply be assigned to - a variable. When a `lock` object is used, any access to the cache - from outside the function wrapper should also be performed within - an appropriate :keyword:`with` statement:: + To perform operations on the cache object, for example to clear the + cache during runtime, the cache should be assigned to a variable. + When a `lock` object is used, any access to the cache from outside + the function wrapper should also be performed within an appropriate + `with` statement:: from threading import RLock from cachetools import cached, LRUCache @@ -223,11 +223,11 @@ often called with the same arguments:: @cached(cache, key=partial(hashkey, 'foo')) def foo(n): - return n + n + return n + n @cached(cache, key=partial(hashkey, 'bar')) def bar(n): - return n * n + return n * n foo(42) bar(42) @@ -242,9 +242,9 @@ often called with the same arguments:: The main difference between this and the :func:`cached` function decorator is that `cache` and `lock` are not passed objects, but - functions. Both will be called with :const:`self` as their sole - argument to retrieve the cache or lock object for the method's - respective instance or class. + functions. Both will be called with :const:`self` (or :const:`cls` + for class methods) as their sole argument to retrieve the cache or + lock object for the method's respective instance or class. .. note:: @@ -288,7 +288,8 @@ often called with the same arguments:: .. versionchanged:: 1.1 - The :attr:`__wrapped__` attribute is now set in Python 2, too. + The :attr:`__wrapped__` attribute is now set when running Python + 2.7, too. .. deprecated:: 1.1 @@ -325,8 +326,8 @@ The following functions can be used as key functions with the These functions can also be helpful when implementing custom key functions for handling some non-hashable arguments. For example, -calling the following function with a custom `env` argument will raise -a :class:`TypeError`, since :class:`dict` is not hashable:: +calling the following function with a dictionary as its `env` argument +will raise a :class:`TypeError`, since :class:`dict` is not hashable:: @cached(LRUCache(maxsize=128) def foo(x, y, z, env={}): @@ -341,7 +342,8 @@ specially:: key += tuple(env.items()) return key -This can then be used in the decorator declaration:: +The :func:`envkey` function can then be used in decorator declarations +like this:: @cached(LRUCache(maxsize=128), key=envkey) @@ -432,7 +434,7 @@ optional keyword arguments: .. _@lru_cache: http://docs.python.org/3/library/functools.html#functools.lru_cache -.. _mutable: http://docs.python.org/dev/glossary.html#term-mutable -.. _mapping: http://docs.python.org/dev/glossary.html#term-mapping .. _cache algorithm: http://en.wikipedia.org/wiki/Cache_algorithms .. _context manager: http://docs.python.org/dev/glossary.html#term-context-manager +.. _mapping: http://docs.python.org/dev/glossary.html#term-mapping +.. _mutable: http://docs.python.org/dev/glossary.html#term-mutable -- cgit v1.2.3 From 068ed522e20449d8324b2c57a26f562839b15d9e Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Mon, 7 Sep 2015 20:47:50 +0200 Subject: Fix #47: Document deprecated @cachedmethod default behavior. --- docs/index.rst | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/docs/index.rst b/docs/index.rst index ed2694b..83a1559 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -282,6 +282,13 @@ often called with the same arguments:: peps = CachedPEPs(cachesize=10) print("PEP #1: %s" % peps.get(1)) + For backwards compatibility, the default key function used by + :func:`cachedmethod` will generate distinct keys for different + methods to ease using a shared cache with multiple methods. This + has been deprecated, and relying on this feature is strongly + discouraged. When using a shared cache, distinct key functions + should be used, as with the :func:`cached` decorator. + .. versionadded:: 1.1 The optional `key` and `lock` parameters. @@ -295,6 +302,11 @@ often called with the same arguments:: The `typed` argument. Use ``key=typedkey`` instead. + .. deprecated:: 1.1 + + When using a shared cached for multiple methods, distinct key + function should be used. + .. deprecated:: 1.1 The wrapper function's :attr:`cache` attribute. Use the -- cgit v1.2.3 From 5cdedd518b7c64839119c667b77b15fc4d0070eb Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Mon, 7 Sep 2015 20:53:37 +0200 Subject: Prepare v1.1.1. --- CHANGES.rst | 82 ++++++++++++++++++++++++++++---------------------- cachetools/__init__.py | 2 +- 2 files changed, 47 insertions(+), 37 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index cdbd2f7..a72e429 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,5 +1,15 @@ -1.1.0 2015-08-28 ----------------- +v1.1.1 (2015-09-07) +------------------- + +- Improve key functions. + +- Improve documentation. + +- Improve unit test coverage. + + +v1.1.0 (2015-08-28) +------------------- - Add ``@cached`` function decorator. @@ -19,14 +29,14 @@ decorator. -1.0.3 2015-06-26 ----------------- +v1.0.3 (2015-06-26) +------------------- - Clear cache statistics when calling ``clear_cache()``. -1.0.2 2015-06-18 ----------------- +v1.0.2 (2015-06-18) +------------------- - Allow simple cache instances to be pickled. @@ -34,8 +44,8 @@ implementation. -1.0.1 2015-06-06 ----------------- +v1.0.1 (2015-06-06) +------------------- - Code cleanup for improved PEP 8 conformance. @@ -45,28 +55,28 @@ - Improve documentation. -1.0.0 2014-12-19 ----------------- +v1.0.0 (2014-12-19) +------------------- - Provide ``RRCache.choice`` property. - Improve documentation. -0.8.2 2014-12-15 ----------------- +v0.8.2 (2014-12-15) +------------------- - Use a ``NestedTimer`` for ``TTLCache``. -0.8.1 2014-12-07 ----------------- +v0.8.1 (2014-12-07) +------------------- - Deprecate ``Cache.getsize()``. -0.8.0 2014-12-03 ----------------- +v0.8.0 (2014-12-03) +------------------- - Ignore ``ValueError`` raised on cache insertion in decorators. @@ -77,14 +87,14 @@ - Feature freeze for `v1.0`. -0.7.1 2014-11-22 ----------------- +v0.7.1 (2014-11-22) +------------------- - Fix `MANIFEST.in`. -0.7.0 2014-11-12 ----------------- +v0.7.0 (2014-11-12) +------------------- - Deprecate ``TTLCache.ExpiredError``. @@ -96,8 +106,8 @@ function decorators. -0.6.0 2014-10-13 ----------------- +v0.6.0 (2014-10-13) +------------------- - Raise ``TTLCache.ExpiredError`` for expired ``TTLCache`` items. @@ -106,16 +116,16 @@ - Allow ``@cachedmethod.cache()`` to return None -0.5.1 2014-09-25 ----------------- +v0.5.1 (2014-09-25) +------------------- - No formatting of ``KeyError`` arguments. - Update ``README.rst``. -0.5.0 2014-09-23 ----------------- +v0.5.0 (2014-09-23) +------------------- - Do not delete expired items in TTLCache.__getitem__(). @@ -124,8 +134,8 @@ - Fix public ``getsizeof()`` usage. -0.4.0 2014-06-16 ----------------- +v0.4.0 (2014-06-16) +------------------- - Add ``TTLCache``. @@ -134,16 +144,16 @@ - Remove ``@cachedmethod`` `lock` parameter. -0.3.1 2014-05-07 ----------------- +v0.3.1 (2014-05-07) +------------------- - Add proper locking for ``cache_clear()`` and ``cache_info()``. - Report `size` in ``cache_info()``. -0.3.0 2014-05-06 ----------------- +v0.3.0 (2014-05-06) +------------------- - Remove ``@cache`` decorator. @@ -152,15 +162,15 @@ - Add ``@cachedmethod`` decorator. -0.2.0 2014-04-02 ----------------- +v0.2.0 (2014-04-02) +------------------- - Add ``@cache`` decorator. - Update documentation. -0.1.0 2014-03-27 ----------------- +v0.1.0 (2014-03-27) +------------------- - Initial release. diff --git a/cachetools/__init__.py b/cachetools/__init__.py index c24a13c..650c52e 100644 --- a/cachetools/__init__.py +++ b/cachetools/__init__.py @@ -18,7 +18,7 @@ __all__ = ( 'lfu_cache', 'lru_cache', 'rr_cache', 'ttl_cache', ) -__version__ = '1.1.0' +__version__ = '1.1.1' _default = [] # evaluates to False -- cgit v1.2.3 From c8c8db1e9cec443f1a15fbac7858b5d8f84506ba Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Tue, 15 Sep 2015 20:06:57 +0200 Subject: Fix pickling of large LRUCache and TTLCache instances. --- CHANGES.rst | 6 ++++++ cachetools/__init__.py | 2 +- cachetools/lru.py | 27 +++++++++++++++++++++++++++ cachetools/ttl.py | 28 ++++++++++++++++++++++++++++ tests/__init__.py | 32 +++++++++++++++++++++++++------- 5 files changed, 87 insertions(+), 8 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index a72e429..34e548f 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,9 @@ +v1.1.2 (2015-09-15) +------------------- + +- Fix pickling of large ``LRUCache`` and ``TTLCache`` instances. + + v1.1.1 (2015-09-07) ------------------- diff --git a/cachetools/__init__.py b/cachetools/__init__.py index 650c52e..65f2c5b 100644 --- a/cachetools/__init__.py +++ b/cachetools/__init__.py @@ -18,7 +18,7 @@ __all__ = ( 'lfu_cache', 'lru_cache', 'rr_cache', 'ttl_cache', ) -__version__ = '1.1.1' +__version__ = '1.1.2' _default = [] # evaluates to False diff --git a/cachetools/lru.py b/cachetools/lru.py index 0877707..5cf52b1 100644 --- a/cachetools/lru.py +++ b/cachetools/lru.py @@ -5,6 +5,15 @@ class _Link(object): __slots__ = 'key', 'value', 'prev', 'next' + def __getstate__(self): + if hasattr(self, 'key'): + return (self.key, self.value) + else: + return None + + def __setstate__(self, state): + self.key, self.value = state + def unlink(self): next = self.next prev = self.prev @@ -68,6 +77,24 @@ class LRUCache(Cache): cache_delitem(self, key) link.unlink() + def __getstate__(self): + state = self.__dict__.copy() + root = self.__root + links = state['__links'] = [root] + link = root.next + while link is not root: + links.append(link) + link = link.next + return state + + def __setstate__(self, state): + links = state.pop('__links') + count = len(links) + for index, link in enumerate(links): + link.prev = links[index - 1] + link.next = links[(index + 1) % count] + self.__dict__.update(state) + def getsizeof(self, value): """Return the size of a cache element's value.""" if isinstance(value, _Link): diff --git a/cachetools/ttl.py b/cachetools/ttl.py index 082c5ec..e65a3a3 100644 --- a/cachetools/ttl.py +++ b/cachetools/ttl.py @@ -12,6 +12,15 @@ class _Link(object): 'lru_prev', 'lru_next' ) + def __getstate__(self): + if hasattr(self, 'key'): + return (self.key, self.value, self.expire, self.size) + else: + return None + + def __setstate__(self, state): + self.key, self.value, self.expire, self.size = state + def unlink(self): ttl_next = self.ttl_next ttl_prev = self.ttl_prev @@ -161,6 +170,25 @@ class TTLCache(Cache): head = head.ttl_next return cache_len(self) - expired + def __getstate__(self): + state = self.__dict__.copy() + root = self.__root + links = state['__links'] = [(root, root)] + lru, ttl = root.lru_next, root.ttl_next + while lru is not root: + links.append((lru, ttl)) + lru = lru.lru_next + ttl = ttl.ttl_next + return state + + def __setstate__(self, state): + links = state.pop('__links') + count = len(links) + for index, (lru, ttl) in enumerate(links): + lru.lru_prev, ttl.ttl_prev = links[index - 1] + lru.lru_next, ttl.ttl_next = links[(index + 1) % count] + self.__dict__.update(state) + @property def currsize(self): root = self.__root diff --git a/tests/__init__.py b/tests/__init__.py index be66f4b..e1c5098 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -204,14 +204,15 @@ class CacheTestMixin(object): def test_cache_pickle(self): import pickle - import sys - cache = self.cache(maxsize=2) - cache.update({1: 1, 2: 2}) - if sys.version_info < (3, 0): - cache = pickle.loads(pickle.dumps(cache, -1)) - else: - cache = pickle.loads(pickle.dumps(cache)) + source = self.cache(maxsize=2) + source.update({1: 1, 2: 2}) + string = pickle.dumps(source) + + cache = pickle.loads(string) + self.assertEqual(string, pickle.dumps(cache)) + self.assertEqual(source, cache) # iteration may change stringrep + self.assertEqual(2, len(cache)) self.assertEqual(1, cache[1]) self.assertEqual(2, cache[2]) @@ -225,3 +226,20 @@ class CacheTestMixin(object): self.assertEqual(2, len(cache)) self.assertEqual(4, cache[4]) self.assertTrue(1 in cache or 2 in cache or 3 in cache) + + self.assertEqual(cache, pickle.loads(pickle.dumps(cache))) + + def test_cache_pickle_maxsize(self): + import pickle + import sys + + # test empty cache, single element, large cache (recursion limit) + for n in [0, 1, sys.getrecursionlimit() * 2]: + source = self.cache(maxsize=n) + source.update((i, i) for i in range(n)) + string = pickle.dumps(source) + + cache = pickle.loads(string) + self.assertEqual(n, len(cache)) + self.assertEqual(string, pickle.dumps(cache)) + self.assertEqual(source, cache) -- cgit v1.2.3 From f4554de8c03a2a7d19593967aebbae70691965dd Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Tue, 15 Sep 2015 20:36:59 +0200 Subject: Fix pickle tests. --- tests/__init__.py | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/tests/__init__.py b/tests/__init__.py index e1c5098..80f6a91 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -207,11 +207,9 @@ class CacheTestMixin(object): source = self.cache(maxsize=2) source.update({1: 1, 2: 2}) - string = pickle.dumps(source) - cache = pickle.loads(string) - self.assertEqual(string, pickle.dumps(cache)) - self.assertEqual(source, cache) # iteration may change stringrep + cache = pickle.loads(pickle.dumps(source)) + self.assertEqual(source, cache) self.assertEqual(2, len(cache)) self.assertEqual(1, cache[1]) @@ -237,9 +235,6 @@ class CacheTestMixin(object): for n in [0, 1, sys.getrecursionlimit() * 2]: source = self.cache(maxsize=n) source.update((i, i) for i in range(n)) - string = pickle.dumps(source) - - cache = pickle.loads(string) + cache = pickle.loads(pickle.dumps(source)) self.assertEqual(n, len(cache)) - self.assertEqual(string, pickle.dumps(cache)) self.assertEqual(source, cache) -- cgit v1.2.3 From 4f4b452255f65b0ec2aaca066f13ac1a4b91ea09 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Tue, 15 Sep 2015 20:43:42 +0200 Subject: Prepare v1.1.3. --- CHANGES.rst | 6 ++++++ cachetools/__init__.py | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index 34e548f..d83e8fa 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,9 @@ +v1.1.3 (2015-09-15) +------------------- + +- Fix pickle tests. + + v1.1.2 (2015-09-15) ------------------- diff --git a/cachetools/__init__.py b/cachetools/__init__.py index 65f2c5b..d90a28d 100644 --- a/cachetools/__init__.py +++ b/cachetools/__init__.py @@ -18,7 +18,7 @@ __all__ = ( 'lfu_cache', 'lru_cache', 'rr_cache', 'ttl_cache', ) -__version__ = '1.1.2' +__version__ = '1.1.3' _default = [] # evaluates to False -- cgit v1.2.3 From 9b35c76cd5ab488953c7d68f5eacbd05f24d0a5e Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Thu, 17 Sep 2015 19:01:18 +0200 Subject: Fix #51: Refactor cache implementations. --- cachetools/__init__.py | 2 +- cachetools/base.py | 109 ------------------------------------------------- cachetools/cache.py | 109 +++++++++++++++++++++++++++++++++++++++++++++++++ cachetools/lfu.py | 2 +- cachetools/lru.py | 63 +++++++++++----------------- cachetools/rr.py | 2 +- cachetools/ttl.py | 62 +++++++++++----------------- 7 files changed, 159 insertions(+), 190 deletions(-) delete mode 100644 cachetools/base.py create mode 100644 cachetools/cache.py diff --git a/cachetools/__init__.py b/cachetools/__init__.py index d90a28d..95c71fd 100644 --- a/cachetools/__init__.py +++ b/cachetools/__init__.py @@ -3,7 +3,7 @@ import functools import warnings -from .base import Cache +from .cache import Cache from .func import lfu_cache, lru_cache, rr_cache, ttl_cache from .keys import hashkey, typedkey from .lfu import LFUCache diff --git a/cachetools/base.py b/cachetools/base.py deleted file mode 100644 index f0f8d5c..0000000 --- a/cachetools/base.py +++ /dev/null @@ -1,109 +0,0 @@ -import collections - - -class Cache(collections.MutableMapping): - """Mutable mapping to serve as a simple cache or cache base class.""" - - def __init__(self, maxsize, missing=None, getsizeof=None): - self.__data = dict() - self.__currsize = 0 - self.__maxsize = maxsize - if missing: - self.__missing = missing - if getsizeof: - self.__getsizeof = getsizeof - - def __repr__(self): - return '%s(%r, maxsize=%d, currsize=%d)' % ( - self.__class__.__name__, - list(self.items()), - self.__maxsize, - self.__currsize, - ) - - def __getitem__(self, key): - try: - return self.__data[key][0] - except KeyError: - return self.__missing__(key) - - def __setitem__(self, key, value): - data = self.__data - maxsize = self.__maxsize - size = self.getsizeof(value) - if size > maxsize: - raise ValueError('value too large') - if key not in data or data[key][1] < size: - while self.__currsize + size > maxsize: - self.popitem() - if key in data: - diffsize = size - data[key][1] - else: - diffsize = size - data[key] = (value, size) - self.__currsize += diffsize - - def __delitem__(self, key): - _, size = self.__data.pop(key) - self.__currsize -= size - - def __contains__(self, key): - return key in self.__data - - def __missing__(self, key): - self.__setitem__(key, self.__missing(key)) - # return value as stored in data - return self.__data[key][0] - - def __iter__(self): - return iter(self.__data) - - def __len__(self): - return len(self.__data) - - @staticmethod - def __getsizeof(value): - return 1 - - @staticmethod - def __missing(key): - raise KeyError(key) - - @property - def maxsize(self): - """The maximum size of the cache.""" - return self.__maxsize - - @property - def currsize(self): - """The current size of the cache.""" - return self.__currsize - - def getsizeof(self, value): - """Return the size of a cache element's value.""" - return self.__getsizeof(value) - - # collections.MutableMapping mixin methods do not handle __missing__ - - def get(self, key, default=None): - if key in self: - return self[key] - else: - return default - - __marker = object() - - def pop(self, key, default=__marker): - if key in self: - value = self[key] - del self[key] - return value - elif default is self.__marker: - raise KeyError(key) - else: - return default - - def setdefault(self, key, default=None): - if key not in self: - self[key] = default - return self[key] diff --git a/cachetools/cache.py b/cachetools/cache.py new file mode 100644 index 0000000..f0f8d5c --- /dev/null +++ b/cachetools/cache.py @@ -0,0 +1,109 @@ +import collections + + +class Cache(collections.MutableMapping): + """Mutable mapping to serve as a simple cache or cache base class.""" + + def __init__(self, maxsize, missing=None, getsizeof=None): + self.__data = dict() + self.__currsize = 0 + self.__maxsize = maxsize + if missing: + self.__missing = missing + if getsizeof: + self.__getsizeof = getsizeof + + def __repr__(self): + return '%s(%r, maxsize=%d, currsize=%d)' % ( + self.__class__.__name__, + list(self.items()), + self.__maxsize, + self.__currsize, + ) + + def __getitem__(self, key): + try: + return self.__data[key][0] + except KeyError: + return self.__missing__(key) + + def __setitem__(self, key, value): + data = self.__data + maxsize = self.__maxsize + size = self.getsizeof(value) + if size > maxsize: + raise ValueError('value too large') + if key not in data or data[key][1] < size: + while self.__currsize + size > maxsize: + self.popitem() + if key in data: + diffsize = size - data[key][1] + else: + diffsize = size + data[key] = (value, size) + self.__currsize += diffsize + + def __delitem__(self, key): + _, size = self.__data.pop(key) + self.__currsize -= size + + def __contains__(self, key): + return key in self.__data + + def __missing__(self, key): + self.__setitem__(key, self.__missing(key)) + # return value as stored in data + return self.__data[key][0] + + def __iter__(self): + return iter(self.__data) + + def __len__(self): + return len(self.__data) + + @staticmethod + def __getsizeof(value): + return 1 + + @staticmethod + def __missing(key): + raise KeyError(key) + + @property + def maxsize(self): + """The maximum size of the cache.""" + return self.__maxsize + + @property + def currsize(self): + """The current size of the cache.""" + return self.__currsize + + def getsizeof(self, value): + """Return the size of a cache element's value.""" + return self.__getsizeof(value) + + # collections.MutableMapping mixin methods do not handle __missing__ + + def get(self, key, default=None): + if key in self: + return self[key] + else: + return default + + __marker = object() + + def pop(self, key, default=__marker): + if key in self: + value = self[key] + del self[key] + return value + elif default is self.__marker: + raise KeyError(key) + else: + return default + + def setdefault(self, key, default=None): + if key not in self: + self[key] = default + return self[key] diff --git a/cachetools/lfu.py b/cachetools/lfu.py index 26a8b1e..d163cba 100644 --- a/cachetools/lfu.py +++ b/cachetools/lfu.py @@ -1,7 +1,7 @@ import collections import operator -from .base import Cache +from .cache import Cache class LFUCache(Cache): diff --git a/cachetools/lru.py b/cachetools/lru.py index 5cf52b1..1daf6d4 100644 --- a/cachetools/lru.py +++ b/cachetools/lru.py @@ -1,18 +1,18 @@ -from .base import Cache +from .cache import Cache class _Link(object): - __slots__ = 'key', 'value', 'prev', 'next' + __slots__ = 'key', 'prev', 'next' def __getstate__(self): if hasattr(self, 'key'): - return (self.key, self.value) + return (self.key,) else: return None def __setstate__(self, state): - self.key, self.value = state + self.key, = state def unlink(self): next = self.next @@ -28,18 +28,20 @@ class LRUCache(Cache): Cache.__init__(self, maxsize, missing, getsizeof) root = self.__root = _Link() root.prev = root.next = root + self.__links = {} def __repr__(self, cache_getitem=Cache.__getitem__): # prevent item reordering return '%s(%r, maxsize=%d, currsize=%d)' % ( self.__class__.__name__, - [(key, cache_getitem(self, key).value) for key in self], + [(key, cache_getitem(self, key)) for key in self], self.maxsize, self.currsize, ) def __getitem__(self, key, cache_getitem=Cache.__getitem__): - link = cache_getitem(self, key) + value = cache_getitem(self, key) + link = self.__links[key] next = link.next prev = link.prev prev.next = next @@ -47,35 +49,25 @@ class LRUCache(Cache): link.next = root = self.__root link.prev = tail = root.prev tail.next = root.prev = link - return link.value - - def __setitem__(self, key, value, - cache_contains=Cache.__contains__, - cache_getitem=Cache.__getitem__, - cache_setitem=Cache.__setitem__): - if cache_contains(self, key): - oldlink = cache_getitem(self, key) + return value + + def __setitem__(self, key, value, cache_setitem=Cache.__setitem__): + cache_setitem(self, key, value) + try: + link = self.__links[key] + except KeyError: + link = self.__links[key] = _Link() # TODO: exception safety? else: - oldlink = None - link = _Link() - link.key = key - link.value = value - cache_setitem(self, key, link) - if oldlink: - oldlink.unlink() + link.unlink() + link.key = key # always update link.next = root = self.__root link.prev = tail = root.prev tail.next = root.prev = link - def __delitem__(self, key, - cache_contains=Cache.__contains__, - cache_getitem=Cache.__getitem__, - cache_delitem=Cache.__delitem__): - if not cache_contains(self, key): - raise KeyError(key) - link = cache_getitem(self, key) + def __delitem__(self, key, cache_delitem=Cache.__delitem__): cache_delitem(self, key) - link.unlink() + self.__links[key].unlink() + del self.__links[key] def __getstate__(self): state = self.__dict__.copy() @@ -95,20 +87,11 @@ class LRUCache(Cache): link.next = links[(index + 1) % count] self.__dict__.update(state) - def getsizeof(self, value): - """Return the size of a cache element's value.""" - if isinstance(value, _Link): - return Cache.getsizeof(self, value.value) - else: - return Cache.getsizeof(self, value) - def popitem(self): """Remove and return the `(key, value)` pair least recently used.""" root = self.__root link = root.next if link is root: - raise KeyError('cache is empty') + raise KeyError('cache is empty: %r' % self.__links) key = link.key - Cache.__delitem__(self, key) - link.unlink() - return (key, link.value) + return (key, self.pop(key)) diff --git a/cachetools/rr.py b/cachetools/rr.py index 4b3bcc6..143223b 100644 --- a/cachetools/rr.py +++ b/cachetools/rr.py @@ -1,6 +1,6 @@ import random -from .base import Cache +from .cache import Cache class RRCache(Cache): diff --git a/cachetools/ttl.py b/cachetools/ttl.py index e65a3a3..1fe1141 100644 --- a/cachetools/ttl.py +++ b/cachetools/ttl.py @@ -1,25 +1,25 @@ import functools import time -from .base import Cache +from .cache import Cache class _Link(object): __slots__ = ( - 'key', 'value', 'expire', 'size', + 'key', 'expire', 'size', 'ttl_prev', 'ttl_next', 'lru_prev', 'lru_next' ) def __getstate__(self): if hasattr(self, 'key'): - return (self.key, self.value, self.expire, self.size) + return (self.key, self.expire, self.size) else: return None def __setstate__(self, state): - self.key, self.value, self.expire, self.size = state + self.key, self.expire, self.size = state def unlink(self): ttl_next = self.ttl_next @@ -73,6 +73,7 @@ class TTLCache(Cache): root = self.__root = _Link() root.ttl_prev = root.ttl_next = root root.lru_prev = root.lru_next = root + self.__links = {} self.__timer = _NestedTimer(timer) self.__ttl = ttl @@ -80,7 +81,7 @@ class TTLCache(Cache): # prevent item reordering/expiration return '%s(%r, maxsize=%d, currsize=%d)' % ( self.__class__.__name__, - [(key, cache_getitem(self, key).value) for key in self], + [(key, cache_getitem(self, key)) for key in self], self.maxsize, self.currsize, ) @@ -89,9 +90,10 @@ class TTLCache(Cache): cache_getitem=Cache.__getitem__, cache_missing=Cache.__missing__): with self.__timer as time: - link = cache_getitem(self, key) + value = cache_getitem(self, key) + link = self.__links[key] if link.expire < time: - return cache_missing(self, key).value + return cache_missing(self, key) next = link.lru_next prev = link.lru_prev prev.lru_next = next @@ -99,27 +101,23 @@ class TTLCache(Cache): link.lru_next = root = self.__root link.lru_prev = tail = root.lru_prev tail.lru_next = root.lru_prev = link - return link.value + return value def __setitem__(self, key, value, - cache_contains=Cache.__contains__, - cache_getitem=Cache.__getitem__, cache_setitem=Cache.__setitem__, cache_getsizeof=Cache.getsizeof): with self.__timer as time: self.expire(time) - if cache_contains(self, key): - oldlink = cache_getitem(self, key) + cache_setitem(self, key, value) + try: + link = self.__links[key] + except KeyError: + link = self.__links[key] = _Link() # TODO: exception safety? else: - oldlink = None - link = _Link() + link.unlink() link.key = key - link.value = value link.expire = time + self.__ttl link.size = cache_getsizeof(self, value) - cache_setitem(self, key, link) - if oldlink: - oldlink.unlink() link.ttl_next = root = self.__root link.ttl_prev = tail = root.ttl_prev tail.ttl_next = root.ttl_prev = link @@ -133,19 +131,15 @@ class TTLCache(Cache): cache_delitem=Cache.__delitem__): with self.__timer as time: self.expire(time) - if not cache_contains(self, key): - raise KeyError(key) - link = cache_getitem(self, key) cache_delitem(self, key) - link.unlink() + self.__links[key].unlink() + del self.__links[key] - def __contains__(self, key, - cache_contains=Cache.__contains__, - cache_getitem=Cache.__getitem__): + def __contains__(self, key): with self.__timer as time: - if not cache_contains(self, key): + if key not in self.__links: return False - elif cache_getitem(self, key).expire < time: + elif self.__links[key].expire < time: return False else: return True @@ -219,17 +213,11 @@ class TTLCache(Cache): cache_delitem = Cache.__delitem__ while head is not root and head.expire < time: cache_delitem(self, head.key) + del self.__links[head.key] next = head.ttl_next head.unlink() head = next - def getsizeof(self, value): - """Return the size of a cache element's value.""" - if isinstance(value, _Link): - return value.size - else: - return Cache.getsizeof(self, value) - def popitem(self): """Remove and return the `(key, value)` pair least recently used that has not already expired. @@ -240,11 +228,9 @@ class TTLCache(Cache): root = self.__root link = root.lru_next if link is root: - raise KeyError('cache is empty') + raise KeyError('cache is empty: %r' % self.__links) key = link.key - Cache.__delitem__(self, key) - link.unlink() - return (key, link.value) + return (key, self.pop(key)) # mixin methods -- cgit v1.2.3 From 429c5f13d94867277c9d2c0b0a92961329d4e951 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Sat, 24 Oct 2015 22:10:25 +0200 Subject: Fix #53: Use fib() and fac() as examples in documentation. --- docs/index.rst | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/docs/index.rst b/docs/index.rst index 83a1559..2819bc3 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -144,7 +144,7 @@ of one argument used to retrieve the size of an item's value. Decorators ------------------------------------------------------------------------ -The :mod:`cachetools` module provides decorators for easily caching +The :mod:`cachetools` module provides decorators for memoizing function and method calls. This can save time when a function is often called with the same arguments:: @@ -204,8 +204,8 @@ often called with the same arguments:: lock = RLock() @cached(cache, lock=lock) - def foo(n): - return n + 1 # expensive operation here... + def fib(n): + return n if n < 2 else fib(n - 1) + fib(n - 2) # make sure access to cache is synchronized with lock: @@ -221,16 +221,16 @@ often called with the same arguments:: cache = LRUCache(maxsize=100) - @cached(cache, key=partial(hashkey, 'foo')) - def foo(n): - return n + n + @cached(cache, key=partial(hashkey, 'fib')) + def fib(n): + return n if n < 2 else fib(n - 1) + fib(n - 2) - @cached(cache, key=partial(hashkey, 'bar')) - def bar(n): - return n * n + @cached(cache, key=partial(hashkey, 'fac')) + def fac(n): + return 1 if n == 0 else n * fac(n - 1) - foo(42) - bar(42) + print(fib(42)) + print(fac(42)) print(cache) .. versionadded:: 1.1 -- cgit v1.2.3 From 7bdca3bf9a458bf26c1b287c91052a8b00844712 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Sat, 24 Oct 2015 22:27:36 +0200 Subject: Fix #55: Document pending removal of deprecated features. --- docs/index.rst | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/docs/index.rst b/docs/index.rst index 2819bc3..9c209b7 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -21,6 +21,14 @@ Multiple cache classes based on different caching algorithms are implemented, and decorators for easily memoizing function and method calls are provided, too. +.. note:: + + Several features are now marked as deprecated and will be removed + in the next major release, :mod:`cachetools` version 2.0. If you + happen to rely on any of these features, it is highly recommended + to specify your module dependencies accordingly, for example + ``cachetools ~= 1.1`` when using :mod:`setuptools`. + .. versionchanged:: 1.1 Moved :func:`functools.lru_cache` compatible decorators to the -- cgit v1.2.3 From 62b827326c0ea615a202c31d4071fef1a556745b Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Sat, 24 Oct 2015 22:28:08 +0200 Subject: Move to container-based infrastructure, install coverage<4 for use with Python 3.2. --- .travis.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index b8561a7..ed67c83 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,3 +1,4 @@ +sudo: false language: python python: - 2.7 @@ -6,7 +7,7 @@ python: - 3.4 - 3.5-dev install: -- pip install . coverage coveralls flake8 flake8-import-order nose +- pip install . "coverage<4" coveralls flake8 flake8-import-order nose script: - flake8 - nosetests -- cgit v1.2.3 From 41d521bd38cdd6c6d0352ebf80eeae747e42a509 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Sat, 24 Oct 2015 22:38:47 +0200 Subject: Prepare v1.1.4. --- CHANGES.rst | 11 +++++++++++ cachetools/__init__.py | 2 +- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index d83e8fa..6dda61c 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,14 @@ +v1.1.4 (2015-10-24) +------------------- + +- Refactor ``LRUCache`` and ``TTLCache`` implementations. Note that + this will break pickle compatibility with previous versions. + +- Document pending removal of deprecated features. + +- Minor documentation improvements. + + v1.1.3 (2015-09-15) ------------------- diff --git a/cachetools/__init__.py b/cachetools/__init__.py index 95c71fd..afa8853 100644 --- a/cachetools/__init__.py +++ b/cachetools/__init__.py @@ -18,7 +18,7 @@ __all__ = ( 'lfu_cache', 'lru_cache', 'rr_cache', 'ttl_cache', ) -__version__ = '1.1.3' +__version__ = '1.1.4' _default = [] # evaluates to False -- cgit v1.2.3 From e332a99beb0f8baaf62a5ea883c0da327af3233a Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Sun, 25 Oct 2015 14:29:25 +0100 Subject: Fix #56: Refactor Cache base class. --- cachetools/cache.py | 41 ++++++++++++++++++++++----------- cachetools/lru.py | 31 ++++++++++++------------- cachetools/ttl.py | 66 ++++++++++++++++++++++++++++++----------------------- 3 files changed, 80 insertions(+), 58 deletions(-) diff --git a/cachetools/cache.py b/cachetools/cache.py index f0f8d5c..fbebbbe 100644 --- a/cachetools/cache.py +++ b/cachetools/cache.py @@ -1,17 +1,31 @@ import collections +class _DefaultSize(object): + def __getitem__(self, _): + return 1 + + def __setitem__(self, _, value): + assert value == 1 + + def pop(self, _): + return 1 + + class Cache(collections.MutableMapping): """Mutable mapping to serve as a simple cache or cache base class.""" + __size = _DefaultSize() + def __init__(self, maxsize, missing=None, getsizeof=None): - self.__data = dict() - self.__currsize = 0 - self.__maxsize = maxsize if missing: self.__missing = missing if getsizeof: self.__getsizeof = getsizeof + self.__size = dict() + self.__data = dict() + self.__currsize = 0 + self.__maxsize = maxsize def __repr__(self): return '%s(%r, maxsize=%d, currsize=%d)' % ( @@ -23,37 +37,38 @@ class Cache(collections.MutableMapping): def __getitem__(self, key): try: - return self.__data[key][0] + return self.__data[key] except KeyError: return self.__missing__(key) def __setitem__(self, key, value): - data = self.__data maxsize = self.__maxsize size = self.getsizeof(value) if size > maxsize: raise ValueError('value too large') - if key not in data or data[key][1] < size: + if key not in self.__data or self.__size[key] < size: while self.__currsize + size > maxsize: self.popitem() - if key in data: - diffsize = size - data[key][1] + if key in self.__data: + diffsize = size - self.__size[key] else: diffsize = size - data[key] = (value, size) + self.__data[key] = value + self.__size[key] = size self.__currsize += diffsize def __delitem__(self, key): - _, size = self.__data.pop(key) + size = self.__size.pop(key) + del self.__data[key] self.__currsize -= size def __contains__(self, key): return key in self.__data def __missing__(self, key): - self.__setitem__(key, self.__missing(key)) - # return value as stored in data - return self.__data[key][0] + value = self.__missing(key) + self.__setitem__(key, value) + return value def __iter__(self): return iter(self.__data) diff --git a/cachetools/lru.py b/cachetools/lru.py index 1daf6d4..f18f0b2 100644 --- a/cachetools/lru.py +++ b/cachetools/lru.py @@ -14,6 +14,11 @@ class _Link(object): def __setstate__(self, state): self.key, = state + def insert(self, next): + self.next = next + self.prev = prev = next.prev + prev.next = next.prev = self + def unlink(self): next = self.next prev = self.prev @@ -26,7 +31,7 @@ class LRUCache(Cache): def __init__(self, maxsize, missing=None, getsizeof=None): Cache.__init__(self, maxsize, missing, getsizeof) - root = self.__root = _Link() + self.__root = root = _Link() root.prev = root.next = root self.__links = {} @@ -42,13 +47,8 @@ class LRUCache(Cache): def __getitem__(self, key, cache_getitem=Cache.__getitem__): value = cache_getitem(self, key) link = self.__links[key] - next = link.next - prev = link.prev - prev.next = next - next.prev = prev - link.next = root = self.__root - link.prev = tail = root.prev - tail.next = root.prev = link + link.unlink() + link.insert(self.__root) return value def __setitem__(self, key, value, cache_setitem=Cache.__setitem__): @@ -56,18 +56,17 @@ class LRUCache(Cache): try: link = self.__links[key] except KeyError: - link = self.__links[key] = _Link() # TODO: exception safety? + link = self.__links[key] = _Link() else: link.unlink() - link.key = key # always update - link.next = root = self.__root - link.prev = tail = root.prev - tail.next = root.prev = link + link.key = key + link.insert(self.__root) def __delitem__(self, key, cache_delitem=Cache.__delitem__): cache_delitem(self, key) - self.__links[key].unlink() - del self.__links[key] + links = self.__links + links[key].unlink() + del links[key] def __getstate__(self): state = self.__dict__.copy() @@ -92,6 +91,6 @@ class LRUCache(Cache): root = self.__root link = root.next if link is root: - raise KeyError('cache is empty: %r' % self.__links) + raise KeyError('%s is empty' % self.__class__.__name__) key = link.key return (key, self.pop(key)) diff --git a/cachetools/ttl.py b/cachetools/ttl.py index 1fe1141..57c8f9c 100644 --- a/cachetools/ttl.py +++ b/cachetools/ttl.py @@ -21,17 +21,36 @@ class _Link(object): def __setstate__(self, state): self.key, self.expire, self.size = state - def unlink(self): - ttl_next = self.ttl_next - ttl_prev = self.ttl_prev - ttl_prev.ttl_next = ttl_next - ttl_next.ttl_prev = ttl_prev + def insert_lru(self, next): + self.lru_next = next + self.lru_prev = prev = next.lru_prev + prev.lru_next = next.lru_prev = self + + def insert_ttl(self, next): + self.ttl_next = next + self.ttl_prev = prev = next.ttl_prev + prev.ttl_next = next.ttl_prev = self + def insert(self, next): + self.insert_lru(next) + self.insert_ttl(next) + + def unlink_lru(self): lru_next = self.lru_next lru_prev = self.lru_prev lru_prev.lru_next = lru_next lru_next.lru_prev = lru_prev + def unlink_ttl(self): + ttl_next = self.ttl_next + ttl_prev = self.ttl_prev + ttl_prev.ttl_next = ttl_next + ttl_next.ttl_prev = ttl_prev + + def unlink(self): + self.unlink_lru() + self.unlink_ttl() + class _NestedTimer(object): @@ -70,7 +89,7 @@ class TTLCache(Cache): def __init__(self, maxsize, ttl, timer=time.time, missing=None, getsizeof=None): Cache.__init__(self, maxsize, missing, getsizeof) - root = self.__root = _Link() + self.__root = root = _Link() root.ttl_prev = root.ttl_next = root root.lru_prev = root.lru_next = root self.__links = {} @@ -94,13 +113,8 @@ class TTLCache(Cache): link = self.__links[key] if link.expire < time: return cache_missing(self, key) - next = link.lru_next - prev = link.lru_prev - prev.lru_next = next - next.lru_prev = prev - link.lru_next = root = self.__root - link.lru_prev = tail = root.lru_prev - tail.lru_next = root.lru_prev = link + link.unlink_lru() + link.insert_lru(self.__root) return value def __setitem__(self, key, value, @@ -112,28 +126,21 @@ class TTLCache(Cache): try: link = self.__links[key] except KeyError: - link = self.__links[key] = _Link() # TODO: exception safety? + link = self.__links[key] = _Link() else: link.unlink() link.key = key link.expire = time + self.__ttl link.size = cache_getsizeof(self, value) - link.ttl_next = root = self.__root - link.ttl_prev = tail = root.ttl_prev - tail.ttl_next = root.ttl_prev = link - link.lru_next = root - link.lru_prev = tail = root.lru_prev - tail.lru_next = root.lru_prev = link - - def __delitem__(self, key, - cache_contains=Cache.__contains__, - cache_getitem=Cache.__getitem__, - cache_delitem=Cache.__delitem__): + link.insert(self.__root) + + def __delitem__(self, key, cache_delitem=Cache.__delitem__): with self.__timer as time: self.expire(time) cache_delitem(self, key) - self.__links[key].unlink() - del self.__links[key] + links = self.__links + links[key].unlink() + del links[key] def __contains__(self, key): with self.__timer as time: @@ -210,10 +217,11 @@ class TTLCache(Cache): time = self.__timer() root = self.__root head = root.ttl_next + links = self.__links cache_delitem = Cache.__delitem__ while head is not root and head.expire < time: cache_delitem(self, head.key) - del self.__links[head.key] + del links[head.key] next = head.ttl_next head.unlink() head = next @@ -228,7 +236,7 @@ class TTLCache(Cache): root = self.__root link = root.lru_next if link is root: - raise KeyError('cache is empty: %r' % self.__links) + raise KeyError('%s is empty' % self.__class__.__name__) key = link.key return (key, self.pop(key)) -- cgit v1.2.3 From 42853733d0caf68ef5bf5933a377572b05437e2c Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Sun, 25 Oct 2015 14:58:38 +0100 Subject: Prepare v1.1.5. --- CHANGES.rst | 9 +++++++++ cachetools/__init__.py | 2 +- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index 6dda61c..dd8b751 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,12 @@ +v1.1.5 (2015-10-25) +------------------- + +- Refactor ``Cache`` base class. Note that this will break pickle + compatibility with previous versions. + +- Clean up ``LRUCache`` and ``TTLCache`` implementations. + + v1.1.4 (2015-10-24) ------------------- diff --git a/cachetools/__init__.py b/cachetools/__init__.py index afa8853..66d5735 100644 --- a/cachetools/__init__.py +++ b/cachetools/__init__.py @@ -18,7 +18,7 @@ __all__ = ( 'lfu_cache', 'lru_cache', 'rr_cache', 'ttl_cache', ) -__version__ = '1.1.4' +__version__ = '1.1.5' _default = [] # evaluates to False -- cgit v1.2.3 From a22741985631795b9bdbeb5a36ab2870aa5f3743 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Sun, 25 Oct 2015 17:07:47 +0100 Subject: Prototype 'missing' abcs. --- cachetools/abc.py | 81 +++++++++++++++++++++++++++++++++++++++++++++++++++++ cachetools/cache.py | 29 ++----------------- 2 files changed, 83 insertions(+), 27 deletions(-) create mode 100644 cachetools/abc.py diff --git a/cachetools/abc.py b/cachetools/abc.py new file mode 100644 index 0000000..54cf840 --- /dev/null +++ b/cachetools/abc.py @@ -0,0 +1,81 @@ +from __future__ import absolute_import + +import collections + +from abc import ABCMeta, abstractmethod + + +class Missing: # TBD: "Missable"? "Missed"? + + __metaclass__ = ABCMeta + + __slots__ = () + + # TBD: abstract? + def __missing__(self, key): + raise KeyError(key) + + @classmethod + def __subclasshook__(cls, C): + if cls is Missing: + if any('__missing__' in B.__dict__ for B in C.__mro__): + return True + return NotImplemented + + +class MissingMapping(collections.Mapping, Missing): + + __slots__ = () + + @abstractmethod + def __getitem__(self, key): + return self.__missing__(key) + + @abstractmethod + def __contains__(self, key): + return False + + def get(self, key, default=None): + if key in self: + return self[key] + else: + return default + + +# TODO: derive fom MissingMapping? +class MissingMutableMapping(collections.MutableMapping, Missing): + + __slots__ = () + + @abstractmethod + def __getitem__(self, key): + return self.__missing__(key) + + @abstractmethod + def __contains__(self, key): + return False + + def get(self, key, default=None): + if key in self: + return self[key] + else: + return default + + __marker = object() + + def pop(self, key, default=__marker): + if key in self: + value = self[key] + del self[key] + return value + elif default is self.__marker: + raise KeyError(key) + else: + return default + + def setdefault(self, key, default=None): + if key in self: + return self[key] + else: + self[key] = default + return default diff --git a/cachetools/cache.py b/cachetools/cache.py index fbebbbe..c0d2b8a 100644 --- a/cachetools/cache.py +++ b/cachetools/cache.py @@ -1,4 +1,4 @@ -import collections +from .abc import MissingMutableMapping class _DefaultSize(object): @@ -12,7 +12,7 @@ class _DefaultSize(object): return 1 -class Cache(collections.MutableMapping): +class Cache(MissingMutableMapping): """Mutable mapping to serve as a simple cache or cache base class.""" __size = _DefaultSize() @@ -97,28 +97,3 @@ class Cache(collections.MutableMapping): def getsizeof(self, value): """Return the size of a cache element's value.""" return self.__getsizeof(value) - - # collections.MutableMapping mixin methods do not handle __missing__ - - def get(self, key, default=None): - if key in self: - return self[key] - else: - return default - - __marker = object() - - def pop(self, key, default=__marker): - if key in self: - value = self[key] - del self[key] - return value - elif default is self.__marker: - raise KeyError(key) - else: - return default - - def setdefault(self, key, default=None): - if key not in self: - self[key] = default - return self[key] -- cgit v1.2.3 From ddf9db143a890c77bd1779fe4627dedc0fa2763a Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Sun, 8 Nov 2015 14:18:59 +0100 Subject: Update travis build, Python 3.5 support. --- .travis.yml | 2 +- setup.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index ed67c83..dd0423b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -5,7 +5,7 @@ python: - 3.2 - 3.3 - 3.4 -- 3.5-dev +- 3.5 install: - pip install . "coverage<4" coveralls flake8 flake8-import-order nose script: diff --git a/setup.py b/setup.py index 4a25ae6..a683e67 100644 --- a/setup.py +++ b/setup.py @@ -31,6 +31,7 @@ setup( 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', + 'Programming Language :: Python :: 3.5', 'Topic :: Software Development :: Libraries :: Python Modules' ], packages=['cachetools'], -- cgit v1.2.3 From a9c3f89aaf0a225ee3a851d9527df4ee82cfe8db Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Sun, 8 Nov 2015 14:19:08 +0100 Subject: Include docs in package. --- MANIFEST.in | 3 +++ 1 file changed, 3 insertions(+) diff --git a/MANIFEST.in b/MANIFEST.in index 8b3684f..b16d610 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -3,4 +3,7 @@ include LICENSE include MANIFEST.in include README.rst +recursive-include docs * +prune docs/_build + recursive-include tests *.py -- cgit v1.2.3 From fd88358d7978a70b52b8ad88cf5c18103a2cec2a Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Fri, 20 Nov 2015 17:41:51 +0100 Subject: Revert "Prototype 'missing' abcs." This reverts commit a22741985631795b9bdbeb5a36ab2870aa5f3743. --- cachetools/abc.py | 81 ----------------------------------------------------- cachetools/cache.py | 29 +++++++++++++++++-- 2 files changed, 27 insertions(+), 83 deletions(-) delete mode 100644 cachetools/abc.py diff --git a/cachetools/abc.py b/cachetools/abc.py deleted file mode 100644 index 54cf840..0000000 --- a/cachetools/abc.py +++ /dev/null @@ -1,81 +0,0 @@ -from __future__ import absolute_import - -import collections - -from abc import ABCMeta, abstractmethod - - -class Missing: # TBD: "Missable"? "Missed"? - - __metaclass__ = ABCMeta - - __slots__ = () - - # TBD: abstract? - def __missing__(self, key): - raise KeyError(key) - - @classmethod - def __subclasshook__(cls, C): - if cls is Missing: - if any('__missing__' in B.__dict__ for B in C.__mro__): - return True - return NotImplemented - - -class MissingMapping(collections.Mapping, Missing): - - __slots__ = () - - @abstractmethod - def __getitem__(self, key): - return self.__missing__(key) - - @abstractmethod - def __contains__(self, key): - return False - - def get(self, key, default=None): - if key in self: - return self[key] - else: - return default - - -# TODO: derive fom MissingMapping? -class MissingMutableMapping(collections.MutableMapping, Missing): - - __slots__ = () - - @abstractmethod - def __getitem__(self, key): - return self.__missing__(key) - - @abstractmethod - def __contains__(self, key): - return False - - def get(self, key, default=None): - if key in self: - return self[key] - else: - return default - - __marker = object() - - def pop(self, key, default=__marker): - if key in self: - value = self[key] - del self[key] - return value - elif default is self.__marker: - raise KeyError(key) - else: - return default - - def setdefault(self, key, default=None): - if key in self: - return self[key] - else: - self[key] = default - return default diff --git a/cachetools/cache.py b/cachetools/cache.py index c0d2b8a..fbebbbe 100644 --- a/cachetools/cache.py +++ b/cachetools/cache.py @@ -1,4 +1,4 @@ -from .abc import MissingMutableMapping +import collections class _DefaultSize(object): @@ -12,7 +12,7 @@ class _DefaultSize(object): return 1 -class Cache(MissingMutableMapping): +class Cache(collections.MutableMapping): """Mutable mapping to serve as a simple cache or cache base class.""" __size = _DefaultSize() @@ -97,3 +97,28 @@ class Cache(MissingMutableMapping): def getsizeof(self, value): """Return the size of a cache element's value.""" return self.__getsizeof(value) + + # collections.MutableMapping mixin methods do not handle __missing__ + + def get(self, key, default=None): + if key in self: + return self[key] + else: + return default + + __marker = object() + + def pop(self, key, default=__marker): + if key in self: + value = self[key] + del self[key] + return value + elif default is self.__marker: + raise KeyError(key) + else: + return default + + def setdefault(self, key, default=None): + if key not in self: + self[key] = default + return self[key] -- cgit v1.2.3 From 2024b775d1226c8f2751ba3937b28a4d60dfff3e Mon Sep 17 00:00:00 2001 From: Alejandro Rivera Date: Wed, 16 Mar 2016 02:02:16 -0600 Subject: Fix a couple typos in the documentation file. --- docs/index.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/index.rst b/docs/index.rst index 9c209b7..b287c46 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -177,7 +177,7 @@ often called with the same arguments:: mutable mapping type, including plain :class:`dict` and :class:`weakref.WeakValueDictionary`. - `key` specifies a functions that will be called with the same + `key` specifies a function that will be called with the same positional and keyword arguments as the wrapped function itself, and which has to return a suitable cache key. Since caches are mappings, the object returned by `key` must be hashable. The @@ -349,7 +349,7 @@ functions for handling some non-hashable arguments. For example, calling the following function with a dictionary as its `env` argument will raise a :class:`TypeError`, since :class:`dict` is not hashable:: - @cached(LRUCache(maxsize=128) + @cached(LRUCache(maxsize=128)) def foo(x, y, z, env={}): pass -- cgit v1.2.3 From 775c51da45db486627365697faf4082b7982d56c Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Tue, 29 Mar 2016 13:03:40 +0200 Subject: Code/build cleanups. --- .gitignore | 42 ++------- .travis.yml | 10 +- MANIFEST.in | 1 + cachetools/abc.py | 48 ++++++++++ cachetools/cache.py | 31 +------ cachetools/lfu.py | 7 +- cachetools/lru.py | 45 +++------ cachetools/rr.py | 15 +-- cachetools/ttl.py | 26 +++--- docs/conf.py | 257 ++++------------------------------------------------ setup.cfg | 10 +- setup.py | 24 ++--- tox.ini | 32 +++++++ 13 files changed, 169 insertions(+), 379 deletions(-) create mode 100644 cachetools/abc.py create mode 100644 tox.ini diff --git a/.gitignore b/.gitignore index ded6067..1a39b30 100644 --- a/.gitignore +++ b/.gitignore @@ -1,36 +1,10 @@ -*.py[cod] - -# C extensions -*.so - -# Packages -*.egg *.egg-info -dist -build -eggs -parts -bin -var -sdist -develop-eggs -.installed.cfg -lib -lib64 -__pycache__ - -# Installer logs -pip-log.txt - -# Unit test / coverage reports +*.pyc +*.swp +.cache/ .coverage -.tox -nosetests.xml - -# Translations -*.mo - -# Mr Developer -.mr.developer.cfg -.project -.pydevproject +.tox/ +MANIFEST +build/ +dist/ +docs/_build/ diff --git a/.travis.yml b/.travis.yml index dd0423b..ac1856c 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,15 +1,19 @@ sudo: false + language: python + python: - 2.7 - 3.2 - 3.3 - 3.4 - 3.5 + install: -- pip install . "coverage<4" coveralls flake8 flake8-import-order nose +- pip install "coverage<4" coveralls tox "virtualenv<14.0.0" + script: -- flake8 -- nosetests +- tox -e check-manifest,flake8,py + after_success: - coveralls diff --git a/MANIFEST.in b/MANIFEST.in index b16d610..de1c916 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -2,6 +2,7 @@ include CHANGES.rst include LICENSE include MANIFEST.in include README.rst +include tox.ini recursive-include docs * prune docs/_build diff --git a/cachetools/abc.py b/cachetools/abc.py new file mode 100644 index 0000000..41ad736 --- /dev/null +++ b/cachetools/abc.py @@ -0,0 +1,48 @@ +from __future__ import absolute_import + +import collections + +from abc import abstractmethod + + +class DefaultMapping(collections.MutableMapping): + + __slots__ = () + + @abstractmethod + def __contains__(self, key): # pragma: nocover + return False + + @abstractmethod + def __getitem__(self, key): # pragma: nocover + if hasattr(self.__class__, '__missing__'): + return self.__class__.__missing__(self, key) + else: + raise KeyError(key) + + def get(self, key, default=None): + if key in self: + return self[key] + else: + return default + + __marker = object() + + def pop(self, key, default=__marker): + if key in self: + value = self[key] + del self[key] + elif default is self.__marker: + raise KeyError(key) + else: + value = default + return value + + def setdefault(self, key, default=None): + if key in self: + value = self[key] + else: + self[key] = value = default + return value + +DefaultMapping.register(dict) diff --git a/cachetools/cache.py b/cachetools/cache.py index fbebbbe..6eb2ed9 100644 --- a/cachetools/cache.py +++ b/cachetools/cache.py @@ -1,4 +1,4 @@ -import collections +from .abc import DefaultMapping class _DefaultSize(object): @@ -12,7 +12,7 @@ class _DefaultSize(object): return 1 -class Cache(collections.MutableMapping): +class Cache(DefaultMapping): """Mutable mapping to serve as a simple cache or cache base class.""" __size = _DefaultSize() @@ -30,7 +30,7 @@ class Cache(collections.MutableMapping): def __repr__(self): return '%s(%r, maxsize=%d, currsize=%d)' % ( self.__class__.__name__, - list(self.items()), + list(self.__data.items()), self.__maxsize, self.__currsize, ) @@ -97,28 +97,3 @@ class Cache(collections.MutableMapping): def getsizeof(self, value): """Return the size of a cache element's value.""" return self.__getsizeof(value) - - # collections.MutableMapping mixin methods do not handle __missing__ - - def get(self, key, default=None): - if key in self: - return self[key] - else: - return default - - __marker = object() - - def pop(self, key, default=__marker): - if key in self: - value = self[key] - del self[key] - return value - elif default is self.__marker: - raise KeyError(key) - else: - return default - - def setdefault(self, key, default=None): - if key not in self: - self[key] = default - return self[key] diff --git a/cachetools/lfu.py b/cachetools/lfu.py index d163cba..62306a5 100644 --- a/cachetools/lfu.py +++ b/cachetools/lfu.py @@ -27,7 +27,8 @@ class LFUCache(Cache): def popitem(self): """Remove and return the `(key, value)` pair least frequently used.""" try: - key = min(self.__counter.items(), key=operator.itemgetter(1))[0] + item = min(self.__counter.items(), key=operator.itemgetter(1)) except ValueError: - raise KeyError('cache is empty') - return key, self.pop(key) + raise KeyError('%s is empty' % self.__class__.__name__) + else: + return (item[0], self.pop(item[0])) diff --git a/cachetools/lru.py b/cachetools/lru.py index f18f0b2..74db909 100644 --- a/cachetools/lru.py +++ b/cachetools/lru.py @@ -5,19 +5,11 @@ class _Link(object): __slots__ = 'key', 'prev', 'next' - def __getstate__(self): - if hasattr(self, 'key'): - return (self.key,) - else: - return None - - def __setstate__(self, state): - self.key, = state + def __init__(self, key=None): + self.key = key - def insert(self, next): - self.next = next - self.prev = prev = next.prev - prev.next = next.prev = self + def __reduce__(self): + return (_Link, (self.key,)) def unlink(self): next = self.next @@ -35,20 +27,13 @@ class LRUCache(Cache): root.prev = root.next = root self.__links = {} - def __repr__(self, cache_getitem=Cache.__getitem__): - # prevent item reordering - return '%s(%r, maxsize=%d, currsize=%d)' % ( - self.__class__.__name__, - [(key, cache_getitem(self, key)) for key in self], - self.maxsize, - self.currsize, - ) - def __getitem__(self, key, cache_getitem=Cache.__getitem__): value = cache_getitem(self, key) link = self.__links[key] link.unlink() - link.insert(self.__root) + link.next = root = self.__root + link.prev = prev = root.prev + prev.next = root.prev = link return value def __setitem__(self, key, value, cache_setitem=Cache.__setitem__): @@ -56,17 +41,17 @@ class LRUCache(Cache): try: link = self.__links[key] except KeyError: - link = self.__links[key] = _Link() + self.__links[key] = link = _Link(key) else: link.unlink() - link.key = key - link.insert(self.__root) + link.next = root = self.__root + link.prev = prev = root.prev + prev.next = root.prev = link def __delitem__(self, key, cache_delitem=Cache.__delitem__): cache_delitem(self, key) - links = self.__links - links[key].unlink() - del links[key] + link = self.__links.pop(key) + link.unlink() def __getstate__(self): state = self.__dict__.copy() @@ -92,5 +77,5 @@ class LRUCache(Cache): link = root.next if link is root: raise KeyError('%s is empty' % self.__class__.__name__) - key = link.key - return (key, self.pop(key)) + else: + return (link.key, self.pop(link.key)) diff --git a/cachetools/rr.py b/cachetools/rr.py index 143223b..c82919e 100644 --- a/cachetools/rr.py +++ b/cachetools/rr.py @@ -11,15 +11,16 @@ class RRCache(Cache): Cache.__init__(self, maxsize, missing, getsizeof) self.__choice = choice + @property + def choice(self): + """The `choice` function used by the cache.""" + return self.__choice + def popitem(self): """Remove and return a random `(key, value)` pair.""" try: key = self.__choice(list(self)) except IndexError: - raise KeyError('cache is empty') - return (key, self.pop(key)) - - @property - def choice(self): - """The `choice` function used by the cache.""" - return self.__choice + raise KeyError('%s is empty' % self.__class__.__name__) + else: + return (key, self.pop(key)) diff --git a/cachetools/ttl.py b/cachetools/ttl.py index 57c8f9c..d06a939 100644 --- a/cachetools/ttl.py +++ b/cachetools/ttl.py @@ -52,7 +52,7 @@ class _Link(object): self.unlink_ttl() -class _NestedTimer(object): +class _Timer(object): def __init__(self, timer): self.__timer = timer @@ -93,7 +93,7 @@ class TTLCache(Cache): root.ttl_prev = root.ttl_next = root root.lru_prev = root.lru_next = root self.__links = {} - self.__timer = _NestedTimer(timer) + self.__timer = _Timer(timer) self.__ttl = ttl def __repr__(self, cache_getitem=Cache.__getitem__): @@ -138,18 +138,16 @@ class TTLCache(Cache): with self.__timer as time: self.expire(time) cache_delitem(self, key) - links = self.__links - links[key].unlink() - del links[key] + link = self.__links.pop(key) + link.unlink() def __contains__(self, key): - with self.__timer as time: - if key not in self.__links: - return False - elif self.__links[key].expire < time: - return False - else: - return True + try: + link = self.__links[key] + except KeyError: + return False + else: + return not (link.expire < self.__timer()) def __iter__(self): timer = self.__timer @@ -237,8 +235,8 @@ class TTLCache(Cache): link = root.lru_next if link is root: raise KeyError('%s is empty' % self.__class__.__name__) - key = link.key - return (key, self.pop(key)) + else: + return (link.key, self.pop(link.key)) # mixin methods diff --git a/docs/conf.py b/docs/conf.py index d675a0f..d51f10b 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -1,245 +1,20 @@ -# -*- coding: utf-8 -*- -# -# cachetools documentation build configuration file, created by -# sphinx-quickstart on Mon Feb 10 09:15:34 2014. -# -# This file is execfile()d with the current directory set to its containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import sys -import os - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -#sys.path.insert(0, os.path.abspath('.')) -sys.path.insert(0, os.path.abspath('..')) -from cachetools import __version__ - -# -- General configuration ----------------------------------------------------- - -# If your documentation needs a minimal Sphinx version, state it here. -#needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be extensions -# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.todo', 'sphinx.ext.coverage'] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -#source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'cachetools' -copyright = u'2014, 2015 Thomas Kemmer' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -version = __version__ -# The full version, including alpha/beta/rc tags. +def get_version(filename): + from re import findall + with open(filename) as f: + metadata = dict(findall(r"__([a-z]+)__ = '([^']+)'", f.read())) + return metadata['version'] + +project = 'cachetools' +copyright = '2014-2016 Thomas Kemmer' +version = get_version(b'../cachetools/__init__.py') release = version -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -#language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -#today = '' -# Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. +extensions = [ + 'sphinx.ext.autodoc', + 'sphinx.ext.coverage', + 'sphinx.ext.doctest', + 'sphinx.ext.todo' +] exclude_patterns = ['_build'] - -# The reST default role (used for this markup: `text`) to use for all documents. -#default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -#add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -#show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] - - -# -- Options for HTML output --------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. +master_doc = 'index' html_theme = 'default' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -#html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -#html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -#html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -#html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -#html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -#html_static_path = ['_static'] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -#html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -#html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -#html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -#html_additional_pages = {} - -# If false, no module index is generated. -#html_domain_indices = True - -# If false, no index is generated. -#html_use_index = True - -# If true, the index is split into individual pages for each letter. -#html_split_index = False - -# If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -#html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -#html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -#html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = None - -# Output file base name for HTML help builder. -htmlhelp_basename = 'cachetoolsdoc' - - -# -- Options for LaTeX output -------------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - #'papersize': 'letterpaper', - - # The font size ('10pt', '11pt' or '12pt'). - #'pointsize': '10pt', - - # Additional stuff for the LaTeX preamble. - #'preamble': '', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass [howto/manual]). -latex_documents = [ - ('index', 'cachetools.tex', u'cachetools Documentation', - u'Thomas Kemmer', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -#latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -#latex_use_parts = False - -# If true, show page references after internal links. -#latex_show_pagerefs = False - -# If true, show URL addresses after external links. -#latex_show_urls = False - -# Documents to append as an appendix to all manuals. -#latex_appendices = [] - -# If false, no module index is generated. -#latex_domain_indices = True - - -# -- Options for manual page output -------------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ('index', 'cachetools', u'cachetools Documentation', - [u'Thomas Kemmer'], 1) -] - -# If true, show URL addresses after external links. -#man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------------ - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ('index', 'cachetools', u'cachetools Documentation', - u'Thomas Kemmer', 'cachetools', 'One line description of project.', - 'Miscellaneous'), -] - -# Documents to append as an appendix to all manuals. -#texinfo_appendices = [] - -# If false, no module index is generated. -#texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -#texinfo_show_urls = 'footnote' diff --git a/setup.cfg b/setup.cfg index 8d5530a..d62e57e 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,12 +1,8 @@ -[bdist_wheel] -universal = 1 - [flake8] -exclude = .git,build,docs,setup.py +exclude = .git,.tox -[nosetests] -with-coverage = 1 -cover-package = cachetools +[wheel] +universal = 1 [build_sphinx] source-dir = docs/ diff --git a/setup.py b/setup.py index a683e67..b065d87 100644 --- a/setup.py +++ b/setup.py @@ -1,23 +1,25 @@ -import os.path, codecs, re +from __future__ import unicode_literals -from setuptools import setup +from setuptools import find_packages, setup -with codecs.open(os.path.join(os.path.dirname(__file__), 'cachetools', '__init__.py'), - encoding='utf8') as f: - metadata = dict(re.findall(r"__([a-z]+)__ = '([^']+)", f.read())) - +def get_version(filename): + from re import findall + with open(filename) as f: + metadata = dict(findall("__([a-z]+)__ = '([^']+)'", f.read())) + return metadata['version'] setup( name='cachetools', - version=metadata['version'], - author='Thomas Kemmer', - author_email='tkemmer@computer.org', + version=get_version('cachetools/__init__.py'), url='https://github.com/tkem/cachetools', license='MIT', + author='Thomas Kemmer', + author_email='tkemmer@computer.org', description='Extensible memoizing collections and decorators', long_description=open('README.rst').read(), keywords='cache caching memoize memoizing memoization LRU LFU TTL', + packages=find_packages(exclude=['tests', 'tests.*']), classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Other Environment', @@ -33,7 +35,5 @@ setup( 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Topic :: Software Development :: Libraries :: Python Modules' - ], - packages=['cachetools'], - test_suite='tests' + ] ) diff --git a/tox.ini b/tox.ini new file mode 100644 index 0000000..c98f2f6 --- /dev/null +++ b/tox.ini @@ -0,0 +1,32 @@ +[tox] +envlist = check-manifest,docs,flake8,py + +[testenv] +# coverage 4.0 drops Python 3.2 compatibility +deps = + coverage<4 + pytest + pytest-cov +commands = + py.test --basetemp={envtmpdir} --cov=cachetools {posargs} + +[testenv:check-manifest] +deps = + check-manifest +commands = + check-manifest +skip_install = true + +[testenv:docs] +deps = + sphinx +commands = + sphinx-build -W -b html -d {envtmpdir}/doctrees docs {envtmpdir}/html + +[testenv:flake8] +deps = + flake8 + flake8-import-order +commands = + flake8 +skip_install = true -- cgit v1.2.3 From 668fdb331d1169c0e37bda699b3f0fbb9bbadc79 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Tue, 29 Mar 2016 23:19:26 +0200 Subject: Fix #66: Use Counter.most_common() for LFUCache.popitem(). --- cachetools/lfu.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/cachetools/lfu.py b/cachetools/lfu.py index 62306a5..160f537 100644 --- a/cachetools/lfu.py +++ b/cachetools/lfu.py @@ -1,5 +1,4 @@ import collections -import operator from .cache import Cache @@ -13,12 +12,12 @@ class LFUCache(Cache): def __getitem__(self, key, cache_getitem=Cache.__getitem__): value = cache_getitem(self, key) - self.__counter[key] += 1 + self.__counter[key] -= 1 return value def __setitem__(self, key, value, cache_setitem=Cache.__setitem__): cache_setitem(self, key, value) - self.__counter[key] += 1 + self.__counter[key] -= 1 def __delitem__(self, key, cache_delitem=Cache.__delitem__): cache_delitem(self, key) @@ -27,8 +26,8 @@ class LFUCache(Cache): def popitem(self): """Remove and return the `(key, value)` pair least frequently used.""" try: - item = min(self.__counter.items(), key=operator.itemgetter(1)) + (key, _), = self.__counter.most_common(1) except ValueError: raise KeyError('%s is empty' % self.__class__.__name__) else: - return (item[0], self.pop(item[0])) + return (key, self.pop(key)) -- cgit v1.2.3 From b445d7a878629bd998d05f9298ed573d631ba73b Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Tue, 29 Mar 2016 15:39:16 +0200 Subject: Fix #64: Reimplement LRUCache and TTLCache using collections.OrderedDict. --- cachetools/lru.py | 81 +++++++++-------------------- cachetools/ttl.py | 151 +++++++++++++++++++----------------------------------- 2 files changed, 76 insertions(+), 156 deletions(-) diff --git a/cachetools/lru.py b/cachetools/lru.py index 74db909..525abd8 100644 --- a/cachetools/lru.py +++ b/cachetools/lru.py @@ -1,21 +1,6 @@ -from .cache import Cache - - -class _Link(object): - - __slots__ = 'key', 'prev', 'next' - - def __init__(self, key=None): - self.key = key - - def __reduce__(self): - return (_Link, (self.key,)) +import collections - def unlink(self): - next = self.next - prev = self.prev - prev.next = next - next.prev = prev +from .cache import Cache class LRUCache(Cache): @@ -23,59 +8,39 @@ class LRUCache(Cache): def __init__(self, maxsize, missing=None, getsizeof=None): Cache.__init__(self, maxsize, missing, getsizeof) - self.__root = root = _Link() - root.prev = root.next = root - self.__links = {} + self.__order = collections.OrderedDict() def __getitem__(self, key, cache_getitem=Cache.__getitem__): value = cache_getitem(self, key) - link = self.__links[key] - link.unlink() - link.next = root = self.__root - link.prev = prev = root.prev - prev.next = root.prev = link + self.__update(key) return value def __setitem__(self, key, value, cache_setitem=Cache.__setitem__): cache_setitem(self, key, value) - try: - link = self.__links[key] - except KeyError: - self.__links[key] = link = _Link(key) - else: - link.unlink() - link.next = root = self.__root - link.prev = prev = root.prev - prev.next = root.prev = link + self.__update(key) def __delitem__(self, key, cache_delitem=Cache.__delitem__): cache_delitem(self, key) - link = self.__links.pop(key) - link.unlink() - - def __getstate__(self): - state = self.__dict__.copy() - root = self.__root - links = state['__links'] = [root] - link = root.next - while link is not root: - links.append(link) - link = link.next - return state - - def __setstate__(self, state): - links = state.pop('__links') - count = len(links) - for index, link in enumerate(links): - link.prev = links[index - 1] - link.next = links[(index + 1) % count] - self.__dict__.update(state) + del self.__order[key] def popitem(self): """Remove and return the `(key, value)` pair least recently used.""" - root = self.__root - link = root.next - if link is root: + try: + key = next(iter(self.__order)) + except StopIteration: raise KeyError('%s is empty' % self.__class__.__name__) else: - return (link.key, self.pop(link.key)) + return (key, self.pop(key)) + + if hasattr(collections.OrderedDict, 'move_to_end'): + def __update(self, key): + try: + self.__order.move_to_end(key) + except KeyError: + self.__order[key] = None + else: + def __update(self, key): + try: + self.__order[key] = self.__order.pop(key) + except KeyError: + self.__order[key] = None diff --git a/cachetools/ttl.py b/cachetools/ttl.py index d06a939..f68d9a8 100644 --- a/cachetools/ttl.py +++ b/cachetools/ttl.py @@ -1,3 +1,4 @@ +import collections import functools import time @@ -6,50 +7,21 @@ from .cache import Cache class _Link(object): - __slots__ = ( - 'key', 'expire', 'size', - 'ttl_prev', 'ttl_next', - 'lru_prev', 'lru_next' - ) + __slots__ = ('key', 'expire', 'size', 'next', 'prev') - def __getstate__(self): - if hasattr(self, 'key'): - return (self.key, self.expire, self.size) - else: - return None + def __init__(self, key=None, expire=None, size=None): + self.key = key + self.expire = expire + self.size = size - def __setstate__(self, state): - self.key, self.expire, self.size = state - - def insert_lru(self, next): - self.lru_next = next - self.lru_prev = prev = next.lru_prev - prev.lru_next = next.lru_prev = self - - def insert_ttl(self, next): - self.ttl_next = next - self.ttl_prev = prev = next.ttl_prev - prev.ttl_next = next.ttl_prev = self - - def insert(self, next): - self.insert_lru(next) - self.insert_ttl(next) - - def unlink_lru(self): - lru_next = self.lru_next - lru_prev = self.lru_prev - lru_prev.lru_next = lru_next - lru_next.lru_prev = lru_prev - - def unlink_ttl(self): - ttl_next = self.ttl_next - ttl_prev = self.ttl_prev - ttl_prev.ttl_next = ttl_next - ttl_next.ttl_prev = ttl_prev + def __reduce__(self): + return _Link, (self.key, self.expire, self.size) def unlink(self): - self.unlink_lru() - self.unlink_ttl() + next = self.next + prev = self.prev + prev.next = next + next.prev = prev class _Timer(object): @@ -58,6 +30,12 @@ class _Timer(object): self.__timer = timer self.__nesting = 0 + def __call__(self): + if self.__nesting == 0: + return self.__timer() + else: + return self.__time + def __enter__(self): if self.__nesting == 0: self.__time = self.__timer() @@ -67,21 +45,12 @@ class _Timer(object): def __exit__(self, *exc): self.__nesting -= 1 - def __call__(self): - if self.__nesting == 0: - return self.__timer() - else: - return self.__time + def __reduce__(self): + return _Timer, (self.__timer,) def __getattr__(self, name): return getattr(self.__timer, name) - def __getstate__(self): - return (self.__timer, self.__nesting) - - def __setstate__(self, state): - self.__timer, self.__nesting = state - class TTLCache(Cache): """LRU Cache implementation with per-item time-to-live (TTL) value.""" @@ -90,9 +59,8 @@ class TTLCache(Cache): getsizeof=None): Cache.__init__(self, maxsize, missing, getsizeof) self.__root = root = _Link() - root.ttl_prev = root.ttl_next = root - root.lru_prev = root.lru_next = root - self.__links = {} + root.prev = root.next = root + self.__links = collections.OrderedDict() self.__timer = _Timer(timer) self.__ttl = ttl @@ -105,34 +73,30 @@ class TTLCache(Cache): self.currsize, ) - def __getitem__(self, key, - cache_getitem=Cache.__getitem__, - cache_missing=Cache.__missing__): + def __getitem__(self, key, cache_getitem=Cache.__getitem__): with self.__timer as time: value = cache_getitem(self, key) - link = self.__links[key] + self.__links[key] = link = self.__links.pop(key) if link.expire < time: - return cache_missing(self, key) - link.unlink_lru() - link.insert_lru(self.__root) - return value - - def __setitem__(self, key, value, - cache_setitem=Cache.__setitem__, - cache_getsizeof=Cache.getsizeof): + return Cache.__missing__(self, key) # FIXME + else: + return value + + def __setitem__(self, key, value, cache_setitem=Cache.__setitem__): with self.__timer as time: self.expire(time) cache_setitem(self, key, value) try: link = self.__links[key] except KeyError: - link = self.__links[key] = _Link() + self.__links[key] = link = _Link(key) else: link.unlink() - link.key = key link.expire = time + self.__ttl - link.size = cache_getsizeof(self, value) - link.insert(self.__root) + link.size = Cache.getsizeof(self, value) + link.next = root = self.__root + link.prev = prev = root.prev + prev.next = root.prev = link def __delitem__(self, key, cache_delitem=Cache.__delitem__): with self.__timer as time: @@ -152,51 +116,42 @@ class TTLCache(Cache): def __iter__(self): timer = self.__timer root = self.__root - curr = root.ttl_next + curr = root.next while curr is not root: with timer as time: if not (curr.expire < time): yield curr.key - curr = curr.ttl_next + curr = curr.next def __len__(self, cache_len=Cache.__len__): root = self.__root - head = root.ttl_next + head = root.next expired = 0 with self.__timer as time: while head is not root and head.expire < time: expired += 1 - head = head.ttl_next + head = head.next return cache_len(self) - expired - def __getstate__(self): - state = self.__dict__.copy() - root = self.__root - links = state['__links'] = [(root, root)] - lru, ttl = root.lru_next, root.ttl_next - while lru is not root: - links.append((lru, ttl)) - lru = lru.lru_next - ttl = ttl.ttl_next - return state - def __setstate__(self, state): - links = state.pop('__links') - count = len(links) - for index, (lru, ttl) in enumerate(links): - lru.lru_prev, ttl.ttl_prev = links[index - 1] - lru.lru_next, ttl.ttl_next = links[(index + 1) % count] self.__dict__.update(state) + root = self.__root + root.prev = root.next = root + for link in sorted(self.__links.values(), key=lambda obj: obj.expire): + link.next = root + link.prev = prev = root.prev + prev.next = root.prev = link + self.expire(self.__timer()) @property def currsize(self): root = self.__root - head = root.ttl_next + head = root.next expired = 0 with self.__timer as time: while head is not root and head.expire < time: expired += head.size - head = head.ttl_next + head = head.next return super(TTLCache, self).currsize - expired @property @@ -214,13 +169,13 @@ class TTLCache(Cache): if time is None: time = self.__timer() root = self.__root - head = root.ttl_next + head = root.next links = self.__links cache_delitem = Cache.__delitem__ while head is not root and head.expire < time: cache_delitem(self, head.key) del links[head.key] - next = head.ttl_next + next = head.next head.unlink() head = next @@ -231,12 +186,12 @@ class TTLCache(Cache): """ with self.__timer as time: self.expire(time) - root = self.__root - link = root.lru_next - if link is root: + try: + key = next(iter(self.__links)) + except StopIteration: raise KeyError('%s is empty' % self.__class__.__name__) else: - return (link.key, self.pop(link.key)) + return (key, self.pop(key)) # mixin methods -- cgit v1.2.3 From b1d836cfc080e59e53f2a73d36acaa7e9b1ad6c0 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Thu, 31 Mar 2016 09:18:48 +0200 Subject: Fix #62: Improve TTLCache expiration handling. --- cachetools/ttl.py | 69 +++++++++++++++++++++---------------------------------- 1 file changed, 26 insertions(+), 43 deletions(-) diff --git a/cachetools/ttl.py b/cachetools/ttl.py index f68d9a8..c341f95 100644 --- a/cachetools/ttl.py +++ b/cachetools/ttl.py @@ -7,15 +7,14 @@ from .cache import Cache class _Link(object): - __slots__ = ('key', 'expire', 'size', 'next', 'prev') + __slots__ = ('key', 'expire', 'next', 'prev') - def __init__(self, key=None, expire=None, size=None): + def __init__(self, key=None, expire=None): self.key = key self.expire = expire - self.size = size def __reduce__(self): - return _Link, (self.key, self.expire, self.size) + return _Link, (self.key, self.expire) def unlink(self): next = self.next @@ -38,9 +37,11 @@ class _Timer(object): def __enter__(self): if self.__nesting == 0: - self.__time = self.__timer() + self.__time = time = self.__timer() + else: + time = self.__time self.__nesting += 1 - return self.__time + return time def __exit__(self, *exc): self.__nesting -= 1 @@ -64,14 +65,13 @@ class TTLCache(Cache): self.__timer = _Timer(timer) self.__ttl = ttl - def __repr__(self, cache_getitem=Cache.__getitem__): - # prevent item reordering/expiration - return '%s(%r, maxsize=%d, currsize=%d)' % ( - self.__class__.__name__, - [(key, cache_getitem(self, key)) for key in self], - self.maxsize, - self.currsize, - ) + def __contains__(self, key): + try: + link = self.__links[key] + except KeyError: + return False + else: + return not (link.expire < self.__timer()) def __getitem__(self, key, cache_getitem=Cache.__getitem__): with self.__timer as time: @@ -93,25 +93,15 @@ class TTLCache(Cache): else: link.unlink() link.expire = time + self.__ttl - link.size = Cache.getsizeof(self, value) - link.next = root = self.__root - link.prev = prev = root.prev - prev.next = root.prev = link + link.next = root = self.__root + link.prev = prev = root.prev + prev.next = root.prev = link def __delitem__(self, key, cache_delitem=Cache.__delitem__): with self.__timer as time: self.expire(time) cache_delitem(self, key) - link = self.__links.pop(key) - link.unlink() - - def __contains__(self, key): - try: - link = self.__links[key] - except KeyError: - return False - else: - return not (link.expire < self.__timer()) + self.__links.pop(key).unlink() def __iter__(self): timer = self.__timer @@ -124,14 +114,13 @@ class TTLCache(Cache): curr = curr.next def __len__(self, cache_len=Cache.__len__): - root = self.__root - head = root.next - expired = 0 + self.expire(time=self.__timer()) + return cache_len(self) + + def __repr__(self, cache_repr=Cache.__repr__): with self.__timer as time: - while head is not root and head.expire < time: - expired += 1 - head = head.next - return cache_len(self) - expired + self.expire(time) + return cache_repr(self) def __setstate__(self, state): self.__dict__.update(state) @@ -145,14 +134,8 @@ class TTLCache(Cache): @property def currsize(self): - root = self.__root - head = root.next - expired = 0 - with self.__timer as time: - while head is not root and head.expire < time: - expired += head.size - head = head.next - return super(TTLCache, self).currsize - expired + self.expire(time=self.__timer()) + return super(TTLCache, self).currsize @property def timer(self): -- cgit v1.2.3 From d4da6633d285a4a3b674fcc3f7dea3b50e9c2af3 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Thu, 31 Mar 2016 14:49:13 +0200 Subject: Fix #60: Call __missing__() of derived classes. --- cachetools/ttl.py | 96 +++++++++++++++++++++++++++++++++++-------------------- tests/test_ttl.py | 18 +++++++++++ 2 files changed, 79 insertions(+), 35 deletions(-) diff --git a/cachetools/ttl.py b/cachetools/ttl.py index c341f95..e19c788 100644 --- a/cachetools/ttl.py +++ b/cachetools/ttl.py @@ -1,5 +1,4 @@ import collections -import functools import time from .cache import Cache @@ -67,32 +66,35 @@ class TTLCache(Cache): def __contains__(self, key): try: - link = self.__links[key] + link = self.__links[key] # no reordering except KeyError: return False else: return not (link.expire < self.__timer()) def __getitem__(self, key, cache_getitem=Cache.__getitem__): - with self.__timer as time: - value = cache_getitem(self, key) - self.__links[key] = link = self.__links.pop(key) - if link.expire < time: - return Cache.__missing__(self, key) # FIXME - else: - return value + try: + link = self.__getlink(key) + except KeyError: + missing = True + else: + missing = link.expire < self.__timer() + if missing: + return self.__missing__(key) + else: + return cache_getitem(self, key) def __setitem__(self, key, value, cache_setitem=Cache.__setitem__): with self.__timer as time: self.expire(time) cache_setitem(self, key, value) - try: - link = self.__links[key] - except KeyError: - self.__links[key] = link = _Link(key) - else: - link.unlink() - link.expire = time + self.__ttl + try: + link = self.__getlink(key) + except KeyError: + self.__links[key] = link = _Link(key) + else: + link.unlink() + link.expire = time + self.__ttl link.next = root = self.__root link.prev = prev = root.prev prev.next = root.prev = link @@ -101,21 +103,28 @@ class TTLCache(Cache): with self.__timer as time: self.expire(time) cache_delitem(self, key) - self.__links.pop(key).unlink() + link = self.__links.pop(key) + link.unlink() def __iter__(self): - timer = self.__timer root = self.__root curr = root.next while curr is not root: - with timer as time: + # "freeze" time for iterator access + with self.__timer as time: if not (curr.expire < time): yield curr.key curr = curr.next - def __len__(self, cache_len=Cache.__len__): - self.expire(time=self.__timer()) - return cache_len(self) + def __len__(self): + root = self.__root + curr = root.next + time = self.__timer() + count = len(self.__links) + while curr is not root and curr.expire < time: + count -= 1 + curr = curr.next + return count def __repr__(self, cache_repr=Cache.__repr__): with self.__timer as time: @@ -134,8 +143,9 @@ class TTLCache(Cache): @property def currsize(self): - self.expire(time=self.__timer()) - return super(TTLCache, self).currsize + with self.__timer as time: + self.expire(time) + return super(TTLCache, self).currsize @property def timer(self): @@ -162,6 +172,23 @@ class TTLCache(Cache): head.unlink() head = next + def clear(self): + with self.__timer as time: + self.expire(time) + Cache.clear(self) + + def get(self, *args, **kwargs): + with self.__timer: + return Cache.get(self, *args, **kwargs) + + def pop(self, *args, **kwargs): + with self.__timer: + return Cache.pop(self, *args, **kwargs) + + def setdefault(self, *args, **kwargs): + with self.__timer: + return Cache.setdefault(self, *args, **kwargs) + def popitem(self): """Remove and return the `(key, value)` pair least recently used that has not already expired. @@ -176,14 +203,13 @@ class TTLCache(Cache): else: return (key, self.pop(key)) - # mixin methods - - def __nested(method): - def wrapper(self, *args, **kwargs): - with self.__timer: - return method(self, *args, **kwargs) - return functools.update_wrapper(wrapper, method) - - get = __nested(Cache.get) - pop = __nested(Cache.pop) - setdefault = __nested(Cache.setdefault) + if hasattr(collections.OrderedDict, 'move_to_end'): + def __getlink(self, key): + value = self.__links[key] + self.__links.move_to_end(key) + return value + else: + def __getlink(self, key): + value = self.__links.pop(key) + self.__links[key] = value + return value diff --git a/tests/test_ttl.py b/tests/test_ttl.py index 005ab31..115545e 100644 --- a/tests/test_ttl.py +++ b/tests/test_ttl.py @@ -175,6 +175,24 @@ class TTLCacheTest(unittest.TestCase, CacheTestMixin): self.assertEqual(1, cache.pop(1)) cache[1] = 1 self.assertEqual(1, cache.setdefault(1)) + cache[1] = 1 + cache.clear() + self.assertEqual(0, len(cache)) + self.assertEqual(0, cache.currsize) + + def test_missing(self): + class DefaultTTLCache(TTLCache): + def __missing__(self, key): + self[key] = key + return key + + cache = DefaultTTLCache(maxsize=1, ttl=1, timer=Timer()) + self.assertEqual(1, cache[1]) + self.assertIn(1, cache) + self.assertNotIn(2, cache) + self.assertEqual(2, cache[2]) + self.assertNotIn(1, cache) + self.assertIn(2, cache) def test_tuple_key(self): cache = self.cache(maxsize=1, ttl=0) -- cgit v1.2.3 From d20f70bd188e490f24d093675dd182786a29ec2d Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Fri, 1 Apr 2016 10:26:14 +0200 Subject: Fix #67: Do not call TTLCache.expire() from TTLCache.__delitem__(). --- cachetools/ttl.py | 36 ++++++++++++++++++------------------ tests/test_ttl.py | 15 ++------------- 2 files changed, 20 insertions(+), 31 deletions(-) diff --git a/cachetools/ttl.py b/cachetools/ttl.py index e19c788..04e9d85 100644 --- a/cachetools/ttl.py +++ b/cachetools/ttl.py @@ -76,10 +76,10 @@ class TTLCache(Cache): try: link = self.__getlink(key) except KeyError: - missing = True + expired = False else: - missing = link.expire < self.__timer() - if missing: + expired = link.expire < self.__timer() + if expired: return self.__missing__(key) else: return cache_getitem(self, key) @@ -100,11 +100,11 @@ class TTLCache(Cache): prev.next = root.prev = link def __delitem__(self, key, cache_delitem=Cache.__delitem__): - with self.__timer as time: - self.expire(time) - cache_delitem(self, key) + cache_delitem(self, key) link = self.__links.pop(key) link.unlink() + if link.expire < self.__timer(): + raise KeyError(key) def __iter__(self): root = self.__root @@ -126,11 +126,6 @@ class TTLCache(Cache): curr = curr.next return count - def __repr__(self, cache_repr=Cache.__repr__): - with self.__timer as time: - self.expire(time) - return cache_repr(self) - def __setstate__(self, state): self.__dict__.update(state) root = self.__root @@ -141,6 +136,11 @@ class TTLCache(Cache): prev.next = root.prev = link self.expire(self.__timer()) + def __repr__(self, cache_repr=Cache.__repr__): + with self.__timer as time: + self.expire(time) + return cache_repr(self) + @property def currsize(self): with self.__timer as time: @@ -162,15 +162,15 @@ class TTLCache(Cache): if time is None: time = self.__timer() root = self.__root - head = root.next + curr = root.next links = self.__links cache_delitem = Cache.__delitem__ - while head is not root and head.expire < time: - cache_delitem(self, head.key) - del links[head.key] - next = head.next - head.unlink() - head = next + while curr is not root and curr.expire < time: + cache_delitem(self, curr.key) + del links[curr.key] + next = curr.next + curr.unlink() + curr = next def clear(self): with self.__timer as time: diff --git a/tests/test_ttl.py b/tests/test_ttl.py index 115545e..0927f5f 100644 --- a/tests/test_ttl.py +++ b/tests/test_ttl.py @@ -61,33 +61,28 @@ class TTLCacheTest(unittest.TestCase, CacheTestMixin): cache[1] = 1 self.assertEqual({1}, set(cache)) self.assertEqual(1, len(cache)) - self.assertEqual(1, cache.currsize) self.assertEqual(1, cache[1]) cache.timer.tick() self.assertEqual({1}, set(cache)) self.assertEqual(1, len(cache)) - self.assertEqual(1, cache.currsize) self.assertEqual(1, cache[1]) cache[2] = 2 self.assertEqual({1, 2}, set(cache)) self.assertEqual(2, len(cache)) - self.assertEqual(2, cache.currsize) self.assertEqual(1, cache[1]) self.assertEqual(2, cache[2]) cache.timer.tick() self.assertEqual({2}, set(cache)) self.assertEqual(1, len(cache)) - self.assertEqual(1, cache.currsize) self.assertNotIn(1, cache) self.assertEqual(2, cache[2]) cache[3] = 3 self.assertEqual({2, 3}, set(cache)) self.assertEqual(2, len(cache)) - self.assertEqual(2, cache.currsize) self.assertNotIn(1, cache) self.assertEqual(2, cache[2]) self.assertEqual(3, cache[3]) @@ -95,7 +90,6 @@ class TTLCacheTest(unittest.TestCase, CacheTestMixin): cache.timer.tick() self.assertEqual({3}, set(cache)) self.assertEqual(1, len(cache)) - self.assertEqual(1, cache.currsize) self.assertNotIn(1, cache) self.assertNotIn(2, cache) self.assertEqual(3, cache[3]) @@ -103,7 +97,6 @@ class TTLCacheTest(unittest.TestCase, CacheTestMixin): cache.timer.tick() self.assertEqual(set(), set(cache)) self.assertEqual(0, len(cache)) - self.assertEqual(0, cache.currsize) self.assertNotIn(1, cache) self.assertNotIn(2, cache) self.assertNotIn(3, cache) @@ -112,6 +105,8 @@ class TTLCacheTest(unittest.TestCase, CacheTestMixin): del cache[1] with self.assertRaises(KeyError): cache.pop(2) + with self.assertRaises(KeyError): + del cache[3] def test_expire(self): cache = self.cache(maxsize=3, ttl=2) @@ -128,7 +123,6 @@ class TTLCacheTest(unittest.TestCase, CacheTestMixin): self.assertEqual({1, 2, 3}, set(cache)) self.assertEqual(3, len(cache)) - self.assertEqual(3, cache.currsize) self.assertEqual(1, cache[1]) self.assertEqual(2, cache[2]) self.assertEqual(3, cache[3]) @@ -136,7 +130,6 @@ class TTLCacheTest(unittest.TestCase, CacheTestMixin): cache.expire() self.assertEqual({1, 2, 3}, set(cache)) self.assertEqual(3, len(cache)) - self.assertEqual(3, cache.currsize) self.assertEqual(1, cache[1]) self.assertEqual(2, cache[2]) self.assertEqual(3, cache[3]) @@ -144,7 +137,6 @@ class TTLCacheTest(unittest.TestCase, CacheTestMixin): cache.expire(3) self.assertEqual({2, 3}, set(cache)) self.assertEqual(2, len(cache)) - self.assertEqual(2, cache.currsize) self.assertNotIn(1, cache) self.assertEqual(2, cache[2]) self.assertEqual(3, cache[3]) @@ -152,7 +144,6 @@ class TTLCacheTest(unittest.TestCase, CacheTestMixin): cache.expire(4) self.assertEqual({3}, set(cache)) self.assertEqual(1, len(cache)) - self.assertEqual(1, cache.currsize) self.assertNotIn(1, cache) self.assertNotIn(2, cache) self.assertEqual(3, cache[3]) @@ -160,7 +151,6 @@ class TTLCacheTest(unittest.TestCase, CacheTestMixin): cache.expire(5) self.assertEqual(set(), set(cache)) self.assertEqual(0, len(cache)) - self.assertEqual(0, cache.currsize) self.assertNotIn(1, cache) self.assertNotIn(2, cache) self.assertNotIn(3, cache) @@ -178,7 +168,6 @@ class TTLCacheTest(unittest.TestCase, CacheTestMixin): cache[1] = 1 cache.clear() self.assertEqual(0, len(cache)) - self.assertEqual(0, cache.currsize) def test_missing(self): class DefaultTTLCache(TTLCache): -- cgit v1.2.3 From 977e1c4f194cfd3c23679d2c32f645aec6d27da0 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Fri, 1 Apr 2016 11:48:29 +0200 Subject: Fix #63: Handle ValueError in Cache.__missing__(). --- cachetools/cache.py | 5 ++++- tests/__init__.py | 12 ++++++++++++ 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/cachetools/cache.py b/cachetools/cache.py index 6eb2ed9..409a6b4 100644 --- a/cachetools/cache.py +++ b/cachetools/cache.py @@ -67,7 +67,10 @@ class Cache(DefaultMapping): def __missing__(self, key): value = self.__missing(key) - self.__setitem__(key, value) + try: + self.__setitem__(key, value) + except ValueError: + pass # value too large return value def __iter__(self): diff --git a/tests/__init__.py b/tests/__init__.py index 80f6a91..401dd42 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -157,6 +157,18 @@ class CacheTestMixin(object): self.assertTrue(1 in cache or 2 in cache) self.assertTrue(1 not in cache or 2 not in cache) + cache = self.cache(maxsize=2, missing=lambda x: x, + getsizeof=lambda x: x) + self.assertEqual(1, cache[1]) + self.assertIn(1, cache) + self.assertEqual(2, cache[2]) + self.assertNotIn(1, cache) + self.assertIn(2, cache) + self.assertEqual(3, cache[3]) + self.assertNotIn(1, cache) + self.assertIn(2, cache) + self.assertNotIn(3, cache) + def test_cache_getsizeof(self): cache = self.cache(maxsize=3, getsizeof=lambda x: x) self.assertEqual(3, cache.maxsize) -- cgit v1.2.3 From 8bb68a235ffd0629a690cfe9a2dd913998f581f1 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Fri, 1 Apr 2016 11:56:54 +0200 Subject: Prepare v1.1.6. --- CHANGES.rst | 17 +++++++++++++++++ README.rst | 2 +- cachetools/__init__.py | 2 +- 3 files changed, 19 insertions(+), 2 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index dd8b751..e468c31 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,20 @@ +v1.1.6 (2016-04-01) +------------------- + +- Reimplement ``LRUCache`` and ``TTLCache`` using + ``collections.OrderedDict``. Note that this will break pickle + compatibility with previous versions. + +- Fix ``TTLCache`` not calling ``__missing__()`` of derived classes. + +- Handle ``ValueError`` in ``Cache.__missing__()`` for consistency + with caching decorators. + +- Improve how ``TTLCache`` handles expired items. + +- Use ``Counter.most_common()`` for ``LFUCache.popitem()``. + + v1.1.5 (2015-10-25) ------------------- diff --git a/README.rst b/README.rst index 227e8a3..a5e62af 100644 --- a/README.rst +++ b/README.rst @@ -72,7 +72,7 @@ Project Resources License ------------------------------------------------------------------------ -Copyright (c) 2014, 2015 Thomas Kemmer. +Copyright (c) 2014-2016 Thomas Kemmer. Licensed under the `MIT License`_. diff --git a/cachetools/__init__.py b/cachetools/__init__.py index 66d5735..0630970 100644 --- a/cachetools/__init__.py +++ b/cachetools/__init__.py @@ -18,7 +18,7 @@ __all__ = ( 'lfu_cache', 'lru_cache', 'rr_cache', 'ttl_cache', ) -__version__ = '1.1.5' +__version__ = '1.1.6' _default = [] # evaluates to False -- cgit v1.2.3 From 381467c0a6b3aa994d205b5e99cc7564dad09509 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Mon, 4 Apr 2016 19:43:51 +0200 Subject: Fix #70: Update LICENSE copyright. --- LICENSE | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LICENSE b/LICENSE index aa77426..73c1611 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ The MIT License (MIT) -Copyright (c) 2014, 2015 Thomas Kemmer +Copyright (c) 2014-2016 Thomas Kemmer Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in -- cgit v1.2.3 From 7f850b8c8fa60ccdeca3ffe865debf2f5f165388 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Mon, 4 Apr 2016 19:52:29 +0200 Subject: Fix #71: No unicode_literals in setup.py. --- setup.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/setup.py b/setup.py index b065d87..11a1404 100644 --- a/setup.py +++ b/setup.py @@ -1,5 +1,3 @@ -from __future__ import unicode_literals - from setuptools import find_packages, setup -- cgit v1.2.3 From 5ab2ab1b41980f72f855872b463cac60b6f14e22 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Tue, 27 Sep 2016 16:06:00 +0200 Subject: Remove PyPI downloads badge. --- README.rst | 4 ---- 1 file changed, 4 deletions(-) diff --git a/README.rst b/README.rst index a5e62af..4742da3 100644 --- a/README.rst +++ b/README.rst @@ -51,10 +51,6 @@ Project Resources :target: https://pypi.python.org/pypi/cachetools/ :alt: Latest PyPI version -.. image:: http://img.shields.io/pypi/dm/cachetools.svg?style=flat - :target: https://pypi.python.org/pypi/cachetools/ - :alt: Number of PyPI downloads - .. image:: http://img.shields.io/travis/tkem/cachetools/master.svg?style=flat :target: https://travis-ci.org/tkem/cachetools/ :alt: Travis CI build status -- cgit v1.2.3 From 7a7c836a200325320676fd7640c02e789764cd41 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Tue, 27 Sep 2016 15:12:23 +0200 Subject: Fix #73: Accept non-integer maxsize in __repr__(). --- cachetools/cache.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cachetools/cache.py b/cachetools/cache.py index 409a6b4..15f526e 100644 --- a/cachetools/cache.py +++ b/cachetools/cache.py @@ -28,7 +28,7 @@ class Cache(DefaultMapping): self.__maxsize = maxsize def __repr__(self): - return '%s(%r, maxsize=%d, currsize=%d)' % ( + return '%s(%r, maxsize=%r, currsize=%r)' % ( self.__class__.__name__, list(self.__data.items()), self.__maxsize, -- cgit v1.2.3 From f9e6e46ab058c26fc48f6a27007cec17f601876a Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Tue, 27 Sep 2016 15:20:44 +0200 Subject: Fix #65: Drop Python 3.2 support. --- .travis.yml | 3 +-- setup.py | 1 - tox.ini | 3 +-- 3 files changed, 2 insertions(+), 5 deletions(-) diff --git a/.travis.yml b/.travis.yml index ac1856c..6a2852b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -4,13 +4,12 @@ language: python python: - 2.7 -- 3.2 - 3.3 - 3.4 - 3.5 install: -- pip install "coverage<4" coveralls tox "virtualenv<14.0.0" +- pip install coveralls tox script: - tox -e check-manifest,flake8,py diff --git a/setup.py b/setup.py index 11a1404..5c0706f 100644 --- a/setup.py +++ b/setup.py @@ -28,7 +28,6 @@ setup( 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', diff --git a/tox.ini b/tox.ini index c98f2f6..0bfd943 100644 --- a/tox.ini +++ b/tox.ini @@ -2,9 +2,8 @@ envlist = check-manifest,docs,flake8,py [testenv] -# coverage 4.0 drops Python 3.2 compatibility deps = - coverage<4 + coverage pytest pytest-cov commands = -- cgit v1.2.3 From bb87402d379dceff5309acfc71a1b335fbca787e Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Mon, 3 Oct 2016 13:00:12 +0200 Subject: Fix #75: Remove deprecated features. --- cachetools/__init__.py | 33 +++------------ cachetools/func.py | 59 +++++++------------------- docs/index.rst | 111 +++++-------------------------------------------- tests/test_func.py | 47 --------------------- tests/test_method.py | 29 ------------- 5 files changed, 30 insertions(+), 249 deletions(-) diff --git a/cachetools/__init__.py b/cachetools/__init__.py index 0630970..ed9586b 100644 --- a/cachetools/__init__.py +++ b/cachetools/__init__.py @@ -1,10 +1,8 @@ """Extensible memoizing collections and decorators.""" import functools -import warnings from .cache import Cache -from .func import lfu_cache, lru_cache, rr_cache, ttl_cache from .keys import hashkey, typedkey from .lfu import LFUCache from .lru import LRUCache @@ -13,9 +11,7 @@ from .ttl import TTLCache __all__ = ( 'Cache', 'LFUCache', 'LRUCache', 'RRCache', 'TTLCache', - 'cached', 'cachedmethod', 'hashkey', 'typedkey', - # make cachetools.func.* available for backwards compatibility - 'lfu_cache', 'lru_cache', 'rr_cache', 'ttl_cache', + 'cached', 'cachedmethod', 'hashkey', 'typedkey' ) __version__ = '1.1.6' @@ -72,29 +68,18 @@ def cached(cache, key=hashkey, lock=None): return decorator -def cachedmethod(cache, key=_default, lock=None, typed=_default): +def cachedmethod(cache, key=hashkey, lock=None): """Decorator to wrap a class or instance method with a memoizing callable that saves results in a cache. """ - if key is not _default and not callable(key): - key, typed = _default, key - if typed is not _default: - warnings.warn("Passing 'typed' to cachedmethod() is deprecated, " - "use 'key=typedkey' instead", DeprecationWarning, 2) - def decorator(method): - # pass method to default key function for backwards compatibilty - if key is _default: - makekey = functools.partial(typedkey if typed else hashkey, method) - else: - makekey = key # custom key function always receive method args if lock is None: def wrapper(self, *args, **kwargs): c = cache(self) if c is None: return method(self, *args, **kwargs) - k = makekey(self, *args, **kwargs) + k = key(self, *args, **kwargs) try: return c[k] except KeyError: @@ -110,7 +95,7 @@ def cachedmethod(cache, key=_default, lock=None, typed=_default): c = cache(self) if c is None: return method(self, *args, **kwargs) - k = makekey(self, *args, **kwargs) + k = key(self, *args, **kwargs) try: with lock(self): return c[k] @@ -123,13 +108,5 @@ def cachedmethod(cache, key=_default, lock=None, typed=_default): except ValueError: pass # value too large return v - _update_wrapper(wrapper, method) - - # deprecated wrapper attribute - def getter(self): - warnings.warn('%s.cache is deprecated' % method.__name__, - DeprecationWarning, 2) - return cache(self) - wrapper.cache = getter - return wrapper + return _update_wrapper(wrapper, method) return decorator diff --git a/cachetools/func.py b/cachetools/func.py index 25b415a..ce32f59 100644 --- a/cachetools/func.py +++ b/cachetools/func.py @@ -4,7 +4,6 @@ import collections import functools import random import time -import warnings try: from threading import RLock @@ -12,37 +11,23 @@ except ImportError: from dummy_threading import RLock from .keys import hashkey, typedkey +from .lfu import LFUCache +from .lru import LRUCache +from .rr import RRCache +from .ttl import TTLCache __all__ = ('lfu_cache', 'lru_cache', 'rr_cache', 'ttl_cache') -class _NLock: - def __enter__(self): - pass - - def __exit__(self, *exc): - pass - _CacheInfo = collections.namedtuple('CacheInfo', [ 'hits', 'misses', 'maxsize', 'currsize' ]) -_marker = object() - - -def _deprecated(message, level=2): - warnings.warn('%s is deprecated' % message, DeprecationWarning, level) - -def _cache(cache, typed=False, context=_marker): +def _cache(cache, typed=False): def decorator(func): key = typedkey if typed else hashkey - if context is _marker: - lock = RLock() - elif context is None: - lock = _NLock() - else: - lock = context() + lock = RLock() stats = [0, 0] def cache_info(): @@ -77,57 +62,43 @@ def _cache(cache, typed=False, context=_marker): return v functools.update_wrapper(wrapper, func) if not hasattr(wrapper, '__wrapped__'): - wrapper.__wrapped__ = func # Python < 3.2 + wrapper.__wrapped__ = func # Python 2.7 wrapper.cache_info = cache_info wrapper.cache_clear = cache_clear return wrapper return decorator -def lfu_cache(maxsize=128, typed=False, getsizeof=None, lock=_marker): +def lfu_cache(maxsize=128, typed=False): """Decorator to wrap a function with a memoizing callable that saves up to `maxsize` results based on a Least Frequently Used (LFU) algorithm. """ - from .lfu import LFUCache - if lock is not _marker: - _deprecated("Passing 'lock' to lfu_cache()", 3) - return _cache(LFUCache(maxsize, getsizeof), typed, lock) + return _cache(LFUCache(maxsize), typed) -def lru_cache(maxsize=128, typed=False, getsizeof=None, lock=_marker): +def lru_cache(maxsize=128, typed=False): """Decorator to wrap a function with a memoizing callable that saves up to `maxsize` results based on a Least Recently Used (LRU) algorithm. """ - from .lru import LRUCache - if lock is not _marker: - _deprecated("Passing 'lock' to lru_cache()", 3) - return _cache(LRUCache(maxsize, getsizeof), typed, lock) + return _cache(LRUCache(maxsize), typed) -def rr_cache(maxsize=128, choice=random.choice, typed=False, getsizeof=None, - lock=_marker): +def rr_cache(maxsize=128, choice=random.choice, typed=False): """Decorator to wrap a function with a memoizing callable that saves up to `maxsize` results based on a Random Replacement (RR) algorithm. """ - from .rr import RRCache - if lock is not _marker: - _deprecated("Passing 'lock' to rr_cache()", 3) - return _cache(RRCache(maxsize, choice, getsizeof), typed, lock) + return _cache(RRCache(maxsize, choice), typed) -def ttl_cache(maxsize=128, ttl=600, timer=time.time, typed=False, - getsizeof=None, lock=_marker): +def ttl_cache(maxsize=128, ttl=600, timer=time.time, typed=False): """Decorator to wrap a function with a memoizing callable that saves up to `maxsize` results based on a Least Recently Used (LRU) algorithm with a per-item time-to-live (TTL) value. """ - from .ttl import TTLCache - if lock is not _marker: - _deprecated("Passing 'lock' to ttl_cache()", 3) - return _cache(TTLCache(maxsize, ttl, timer, getsizeof), typed, lock) + return _cache(TTLCache(maxsize, ttl, timer), typed) diff --git a/docs/index.rst b/docs/index.rst index b287c46..bc850e0 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -21,20 +21,6 @@ Multiple cache classes based on different caching algorithms are implemented, and decorators for easily memoizing function and method calls are provided, too. -.. note:: - - Several features are now marked as deprecated and will be removed - in the next major release, :mod:`cachetools` version 2.0. If you - happen to rely on any of these features, it is highly recommended - to specify your module dependencies accordingly, for example - ``cachetools ~= 1.1`` when using :mod:`setuptools`. - -.. versionchanged:: 1.1 - - Moved :func:`functools.lru_cache` compatible decorators to the - :mod:`cachetools.func` module. For backwards compatibility, they - continue to be visible in this module as well. - Cache implementations ------------------------------------------------------------------------ @@ -243,7 +229,7 @@ often called with the same arguments:: .. versionadded:: 1.1 -.. decorator:: cachedmethod(cache, key=hashkey, lock=None, typed=False) +.. decorator:: cachedmethod(cache, key=hashkey, lock=None) Decorator to wrap a class or instance method with a memoizing callable that saves results in a (possibly shared) cache. @@ -261,11 +247,6 @@ often called with the same arguments:: is the user's responsibility to handle concurrent calls to the underlying wrapped method in a multithreaded environment. - If `key` or the optional `typed` keyword argument are set to - :const:`True`, the :func:`typedkey` function is used for generating - hash keys. This has been deprecated in favor of specifying - ``key=typedkey`` explicitly. - One advantage of :func:`cachedmethod` over the :func:`cached` function decorator is that cache properties such as `maxsize` can be set at runtime:: @@ -290,37 +271,6 @@ often called with the same arguments:: peps = CachedPEPs(cachesize=10) print("PEP #1: %s" % peps.get(1)) - For backwards compatibility, the default key function used by - :func:`cachedmethod` will generate distinct keys for different - methods to ease using a shared cache with multiple methods. This - has been deprecated, and relying on this feature is strongly - discouraged. When using a shared cache, distinct key functions - should be used, as with the :func:`cached` decorator. - - .. versionadded:: 1.1 - - The optional `key` and `lock` parameters. - - .. versionchanged:: 1.1 - - The :attr:`__wrapped__` attribute is now set when running Python - 2.7, too. - - .. deprecated:: 1.1 - - The `typed` argument. Use ``key=typedkey`` instead. - - .. deprecated:: 1.1 - - When using a shared cached for multiple methods, distinct key - function should be used. - - .. deprecated:: 1.1 - - The wrapper function's :attr:`cache` attribute. Use the - original function passed as the decorator's `cache` argument to - access the cache object. - Key functions ------------------------------------------------------------------------ @@ -333,8 +283,6 @@ The following functions can be used as key functions with the This function returns a :class:`tuple` instance suitable as a cache key, provided the positional and keywords arguments are hashable. - .. versionadded:: 1.1 - .. autofunction:: typedkey This function is similar to :func:`hashkey`, but arguments of @@ -342,8 +290,6 @@ The following functions can be used as key functions with the ``typedkey(3)`` and ``typedkey(3.0)`` will return different results. - .. versionadded:: 1.1 - These functions can also be helpful when implementing custom key functions for handling some non-hashable arguments. For example, calling the following function with a dictionary as its `env` argument @@ -381,77 +327,40 @@ different caching strategies. Note that unlike :func:`functools.lru_cache`, setting `maxsize` to :const:`None` is not supported. +If the optional argument `typed` is set to :const:`True`, function +arguments of different types will be cached separately. For example, +``f(3)`` and ``f(3.0)`` will be treated as distinct calls with +distinct results. + The wrapped function is instrumented with :func:`cache_info` and :func:`cache_clear` functions to provide information about cache performance and clear the cache. See the :func:`functools.lru_cache` documentation for details. -In addition to `maxsize`, all decorators accept the following -optional keyword arguments: - -- `typed`, if is set to :const:`True`, will cause function arguments - of different types to be cached separately. For example, ``f(3)`` - and ``f(3.0)`` will be treated as distinct calls with distinct - results. - -- `getsizeof` specifies a function of one argument that will be - applied to each cache value to determine its size. The default - value is :const:`None`, which will assign each item an equal size of - :const:`1`. This has been deprecated in favor of the new - :func:`cachetools.cached` decorator, which allows passing fully - customized cache objects. - -- `lock` specifies a function of zero arguments that returns a - `context manager`_ to lock the cache when necessary. If not - specified, :class:`threading.RLock` will be used to synchronize - access from multiple threads. The use of `lock` is discouraged, and - the `lock` argument has been deprecated. - -.. versionadded:: 1.1 - - Formerly, the decorators provided by :mod:`cachetools.func` were - part of the :mod:`cachetools` module. - -.. decorator:: lfu_cache(maxsize=128, typed=False, getsizeof=None, lock=threading.RLock) +.. decorator:: lfu_cache(maxsize=128, typed=False) Decorator that wraps a function with a memoizing callable that saves up to `maxsize` results based on a Least Frequently Used (LFU) algorithm. - .. deprecated:: 1.1 - - The `getsizeof` and `lock` arguments. - -.. decorator:: lru_cache(maxsize=128, typed=False, getsizeof=None, lock=threading.RLock) +.. decorator:: lru_cache(maxsize=128, typed=False) Decorator that wraps a function with a memoizing callable that saves up to `maxsize` results based on a Least Recently Used (LRU) algorithm. - .. deprecated:: 1.1 - - The `getsizeof` and `lock` arguments. - -.. decorator:: rr_cache(maxsize=128, choice=random.choice, typed=False, getsizeof=None, lock=threading.RLock) +.. decorator:: rr_cache(maxsize=128, choice=random.choice, typed=False) Decorator that wraps a function with a memoizing callable that saves up to `maxsize` results based on a Random Replacement (RR) algorithm. - .. deprecated:: 1.1 - - The `getsizeof` and `lock` arguments. - -.. decorator:: ttl_cache(maxsize=128, ttl=600, timer=time.time, typed=False, getsizeof=None, lock=threading.RLock) +.. decorator:: ttl_cache(maxsize=128, ttl=600, timer=time.time, typed=False) Decorator to wrap a function with a memoizing callable that saves up to `maxsize` results based on a Least Recently Used (LRU) algorithm with a per-item time-to-live (TTL) value. - .. deprecated:: 1.1 - - The `getsizeof` and `lock` arguments. - .. _@lru_cache: http://docs.python.org/3/library/functools.html#functools.lru_cache .. _cache algorithm: http://en.wikipedia.org/wiki/Cache_algorithms diff --git a/tests/test_func.py b/tests/test_func.py index 236a5d7..1f33246 100644 --- a/tests/test_func.py +++ b/tests/test_func.py @@ -1,5 +1,4 @@ import unittest -import warnings import cachetools.func @@ -55,52 +54,6 @@ class DecoratorTestMixin(object): self.assertEqual(cached(1.0), 1.0) self.assertEqual(cached.cache_info(), (2, 2, 2, 2)) - def test_decorator_lock(self): - class Lock(object): - count = 0 - - def __enter__(self): - Lock.count += 1 - - def __exit__(self, *exc): - pass - - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') - cached = self.decorator(maxsize=2, lock=Lock)(lambda n: n) - self.assertEqual(len(w), 1) - self.assertIs(w[0].category, DeprecationWarning) - - self.assertEqual(cached.cache_info(), (0, 0, 2, 0)) - self.assertEqual(Lock.count, 1) - self.assertEqual(cached(1), 1) - self.assertEqual(Lock.count, 3) - self.assertEqual(cached.cache_info(), (0, 1, 2, 1)) - self.assertEqual(Lock.count, 4) - self.assertEqual(cached(1), 1) - self.assertEqual(Lock.count, 5) - self.assertEqual(cached.cache_info(), (1, 1, 2, 1)) - self.assertEqual(Lock.count, 6) - self.assertEqual(cached(1.0), 1.0) - self.assertEqual(Lock.count, 7) - self.assertEqual(cached.cache_info(), (2, 1, 2, 1)) - self.assertEqual(Lock.count, 8) - - def test_decorator_nolock(self): - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') - cached = self.decorator(maxsize=2, lock=None)(lambda n: n) - self.assertEqual(len(w), 1) - self.assertIs(w[0].category, DeprecationWarning) - - self.assertEqual(cached.cache_info(), (0, 0, 2, 0)) - self.assertEqual(cached(1), 1) - self.assertEqual(cached.cache_info(), (0, 1, 2, 1)) - self.assertEqual(cached(1), 1) - self.assertEqual(cached.cache_info(), (1, 1, 2, 1)) - self.assertEqual(cached(1.0), 1.0) - self.assertEqual(cached.cache_info(), (2, 1, 2, 1)) - class LFUDecoratorTest(unittest.TestCase, DecoratorTestMixin): diff --git a/tests/test_method.py b/tests/test_method.py index b0b9916..cf53536 100644 --- a/tests/test_method.py +++ b/tests/test_method.py @@ -1,6 +1,5 @@ import operator import unittest -import warnings from cachetools import LRUCache, cachedmethod, typedkey @@ -45,7 +44,6 @@ class CachedMethodTest(unittest.TestCase): def test_dict(self): cached = Cached({}) - self.assertEqual(cached.cache, cached.get.cache(cached)) self.assertEqual(cached.get(0), 0) self.assertEqual(cached.get(1), 1) @@ -58,7 +56,6 @@ class CachedMethodTest(unittest.TestCase): def test_typed_dict(self): cached = Cached(LRUCache(maxsize=2)) - self.assertEqual(cached.cache, cached.get_typed.cache(cached)) self.assertEqual(cached.get_typed(0), 0) self.assertEqual(cached.get_typed(1), 1) @@ -70,7 +67,6 @@ class CachedMethodTest(unittest.TestCase): def test_lru(self): cached = Cached(LRUCache(maxsize=2)) - self.assertEqual(cached.cache, cached.get.cache(cached)) self.assertEqual(cached.get(0), 0) self.assertEqual(cached.get(1), 1) @@ -83,7 +79,6 @@ class CachedMethodTest(unittest.TestCase): def test_typed_lru(self): cached = Cached(LRUCache(maxsize=2)) - self.assertEqual(cached.cache, cached.get_typed.cache(cached)) self.assertEqual(cached.get_typed(0), 0) self.assertEqual(cached.get_typed(1), 1) @@ -95,7 +90,6 @@ class CachedMethodTest(unittest.TestCase): def test_nospace(self): cached = Cached(LRUCache(maxsize=0)) - self.assertEqual(cached.cache, cached.get.cache(cached)) self.assertEqual(cached.get(0), 0) self.assertEqual(cached.get(1), 1) @@ -105,7 +99,6 @@ class CachedMethodTest(unittest.TestCase): def test_nocache(self): cached = Cached(None) - self.assertEqual(None, cached.get.cache(cached)) self.assertEqual(cached.get(0), 0) self.assertEqual(cached.get(1), 1) @@ -124,7 +117,6 @@ class CachedMethodTest(unittest.TestCase): return Int(fractions.Fraction.__add__(self, other)) cached = Cached(weakref.WeakValueDictionary(), count=Int(0)) - self.assertEqual(cached.cache, cached.get.cache(cached)) self.assertEqual(cached.get(0), 0) self.assertEqual(cached.get(0), 1) @@ -144,7 +136,6 @@ class CachedMethodTest(unittest.TestCase): def test_locked_dict(self): cached = Locked({}) - self.assertEqual(cached.cache, cached.get.cache(cached)) self.assertEqual(cached.get(0), 1) self.assertEqual(cached.get(1), 3) @@ -154,7 +145,6 @@ class CachedMethodTest(unittest.TestCase): def test_locked_nocache(self): cached = Locked(None) - self.assertEqual(None, cached.get.cache(cached)) self.assertEqual(cached.get(0), 0) self.assertEqual(cached.get(1), 0) @@ -164,28 +154,9 @@ class CachedMethodTest(unittest.TestCase): def test_locked_nospace(self): cached = Locked(LRUCache(maxsize=0)) - self.assertEqual(cached.cache, cached.get.cache(cached)) self.assertEqual(cached.get(0), 1) self.assertEqual(cached.get(1), 3) self.assertEqual(cached.get(1), 5) self.assertEqual(cached.get(1.0), 7) self.assertEqual(cached.get(1.0), 9) - - def test_typed_deprecated(self): - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - cachedmethod(lambda self: None, None)(lambda self: None) - self.assertIs(w[-1].category, DeprecationWarning) - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - cachedmethod(lambda self: None, False)(lambda self: None) - self.assertIs(w[-1].category, DeprecationWarning) - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - cachedmethod(lambda self: None, True)(lambda self: None) - self.assertIs(w[-1].category, DeprecationWarning) - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - cachedmethod(lambda self: None, typed=None)(lambda self: None) - self.assertIs(w[-1].category, DeprecationWarning) -- cgit v1.2.3 From 1770a71f3e441edd6c636412d2309cbcf9ec931b Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Mon, 3 Oct 2016 13:43:18 +0200 Subject: Fix #74: Move key functions to separate package. --- cachetools/__init__.py | 10 ++++++---- cachetools/cache.py | 2 ++ cachetools/func.py | 6 ++++-- cachetools/keys.py | 4 ++++ cachetools/lfu.py | 2 ++ cachetools/lru.py | 2 ++ cachetools/rr.py | 2 ++ cachetools/ttl.py | 2 ++ docs/index.rst | 20 ++++++++++---------- tests/test_keys.py | 8 ++++---- tests/test_method.py | 4 ++-- tests/test_wrapper.py | 40 ++++++++++++++++++---------------------- 12 files changed, 58 insertions(+), 44 deletions(-) diff --git a/cachetools/__init__.py b/cachetools/__init__.py index ed9586b..4144e48 100644 --- a/cachetools/__init__.py +++ b/cachetools/__init__.py @@ -1,9 +1,11 @@ """Extensible memoizing collections and decorators.""" +from __future__ import absolute_import + import functools +from . import keys from .cache import Cache -from .keys import hashkey, typedkey from .lfu import LFUCache from .lru import LRUCache from .rr import RRCache @@ -11,7 +13,7 @@ from .ttl import TTLCache __all__ = ( 'Cache', 'LFUCache', 'LRUCache', 'RRCache', 'TTLCache', - 'cached', 'cachedmethod', 'hashkey', 'typedkey' + 'cached', 'cachedmethod' ) __version__ = '1.1.6' @@ -27,7 +29,7 @@ else: return wrapper -def cached(cache, key=hashkey, lock=None): +def cached(cache, key=keys.hashkey, lock=None): """Decorator to wrap a function with a memoizing callable that saves results in a cache. @@ -68,7 +70,7 @@ def cached(cache, key=hashkey, lock=None): return decorator -def cachedmethod(cache, key=hashkey, lock=None): +def cachedmethod(cache, key=keys.hashkey, lock=None): """Decorator to wrap a class or instance method with a memoizing callable that saves results in a cache. diff --git a/cachetools/cache.py b/cachetools/cache.py index 15f526e..0852631 100644 --- a/cachetools/cache.py +++ b/cachetools/cache.py @@ -1,3 +1,5 @@ +from __future__ import absolute_import + from .abc import DefaultMapping diff --git a/cachetools/func.py b/cachetools/func.py index ce32f59..5a2ce84 100644 --- a/cachetools/func.py +++ b/cachetools/func.py @@ -1,5 +1,7 @@ """`functools.lru_cache` compatible memoizing function decorators.""" +from __future__ import absolute_import + import collections import functools import random @@ -10,7 +12,7 @@ try: except ImportError: from dummy_threading import RLock -from .keys import hashkey, typedkey +from . import keys from .lfu import LFUCache from .lru import LRUCache from .rr import RRCache @@ -26,7 +28,7 @@ _CacheInfo = collections.namedtuple('CacheInfo', [ def _cache(cache, typed=False): def decorator(func): - key = typedkey if typed else hashkey + key = keys.typedkey if typed else keys.hashkey lock = RLock() stats = [0, 0] diff --git a/cachetools/keys.py b/cachetools/keys.py index 887fb30..ba1e2fc 100644 --- a/cachetools/keys.py +++ b/cachetools/keys.py @@ -1,3 +1,7 @@ +"""Key functions for memoizing decorators.""" + +from __future__ import absolute_import + __all__ = ('hashkey', 'typedkey') diff --git a/cachetools/lfu.py b/cachetools/lfu.py index 160f537..f5817a4 100644 --- a/cachetools/lfu.py +++ b/cachetools/lfu.py @@ -1,3 +1,5 @@ +from __future__ import absolute_import + import collections from .cache import Cache diff --git a/cachetools/lru.py b/cachetools/lru.py index 525abd8..b945797 100644 --- a/cachetools/lru.py +++ b/cachetools/lru.py @@ -1,3 +1,5 @@ +from __future__ import absolute_import + import collections from .cache import Cache diff --git a/cachetools/rr.py b/cachetools/rr.py index c82919e..8cd856c 100644 --- a/cachetools/rr.py +++ b/cachetools/rr.py @@ -1,3 +1,5 @@ +from __future__ import absolute_import + import random from .cache import Cache diff --git a/cachetools/ttl.py b/cachetools/ttl.py index 04e9d85..d20cc0b 100644 --- a/cachetools/ttl.py +++ b/cachetools/ttl.py @@ -1,3 +1,5 @@ +from __future__ import absolute_import + import collections import time diff --git a/docs/index.rst b/docs/index.rst index bc850e0..23c4dae 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -135,7 +135,7 @@ of one argument used to retrieve the size of an item's value. expired by the current value returned by :attr:`timer`. -Decorators +Memoizing decorators ------------------------------------------------------------------------ The :mod:`cachetools` module provides decorators for memoizing @@ -151,7 +151,7 @@ often called with the same arguments:: for i in range(100): print('fib(%d) = %d' % (i, fib(i))) -.. decorator:: cached(cache, key=hashkey, lock=None) +.. decorator:: cached(cache, key=cachetools.keys.hashkey, lock=None) Decorator to wrap a function with a memoizing callable that saves results in a cache. @@ -167,7 +167,7 @@ often called with the same arguments:: positional and keyword arguments as the wrapped function itself, and which has to return a suitable cache key. Since caches are mappings, the object returned by `key` must be hashable. The - default is to call :func:`hashkey`. + default is to call :func:`cachetools.keys.hashkey`. If `lock` is not :const:`None`, it must specify an object implementing the `context manager`_ protocol. Any access to the @@ -227,9 +227,7 @@ often called with the same arguments:: print(fac(42)) print(cache) - .. versionadded:: 1.1 - -.. decorator:: cachedmethod(cache, key=hashkey, lock=None) +.. decorator:: cachedmethod(cache, key=cachetools.keys.hashkey, lock=None) Decorator to wrap a class or instance method with a memoizing callable that saves results in a (possibly shared) cache. @@ -272,11 +270,13 @@ often called with the same arguments:: print("PEP #1: %s" % peps.get(1)) -Key functions ------------------------------------------------------------------------- +:mod:`cachetools.keys` --- Key functions for memoizing decorators +============================================================================ + +.. module:: cachetools.keys -The following functions can be used as key functions with the -:func:`cached` and :func:`cachedmethod` decorators: +This module provides several functions that can be used as key +functions with the :func:`cached` and :func:`cachedmethod` decorators: .. autofunction:: hashkey diff --git a/tests/test_keys.py b/tests/test_keys.py index 94184a5..2b9ced6 100644 --- a/tests/test_keys.py +++ b/tests/test_keys.py @@ -1,11 +1,11 @@ import unittest -import cachetools +import cachetools.keys class CacheKeysTest(unittest.TestCase): - def test_hashkey(self, key=cachetools.hashkey): + def test_hashkey(self, key=cachetools.keys.hashkey): self.assertEqual(key(), key()) self.assertEqual(hash(key()), hash(key())) self.assertEqual(key(1, 2, 3), key(1, 2, 3)) @@ -22,7 +22,7 @@ class CacheKeysTest(unittest.TestCase): self.assertEqual(key(1, 2, 3), key(1.0, 2.0, 3.0)) self.assertEqual(hash(key(1, 2, 3)), hash(key(1.0, 2.0, 3.0))) - def test_typedkey(self, key=cachetools.typedkey): + def test_typedkey(self, key=cachetools.keys.typedkey): self.assertEqual(key(), key()) self.assertEqual(hash(key()), hash(key())) self.assertEqual(key(1, 2, 3), key(1, 2, 3)) @@ -38,7 +38,7 @@ class CacheKeysTest(unittest.TestCase): # typed keys compare unequal self.assertNotEqual(key(1, 2, 3), key(1.0, 2.0, 3.0)) - def test_addkeys(self, key=cachetools.hashkey): + def test_addkeys(self, key=cachetools.keys.hashkey): self.assertIsInstance(key(), tuple) self.assertIsInstance(key(1, 2, 3) + key(4, 5, 6), type(key())) self.assertIsInstance(key(1, 2, 3) + (4, 5, 6), type(key())) diff --git a/tests/test_method.py b/tests/test_method.py index cf53536..db810b6 100644 --- a/tests/test_method.py +++ b/tests/test_method.py @@ -1,7 +1,7 @@ import operator import unittest -from cachetools import LRUCache, cachedmethod, typedkey +from cachetools import LRUCache, cachedmethod, keys class Cached(object): @@ -16,7 +16,7 @@ class Cached(object): self.count += 1 return count - @cachedmethod(operator.attrgetter('cache'), key=typedkey) + @cachedmethod(operator.attrgetter('cache'), key=keys.typedkey) def get_typed(self, value): count = self.count self.count += 1 diff --git a/tests/test_wrapper.py b/tests/test_wrapper.py index 1d03fb2..a6e649c 100644 --- a/tests/test_wrapper.py +++ b/tests/test_wrapper.py @@ -1,6 +1,7 @@ import unittest import cachetools +import cachetools.keys class DecoratorTestMixin(object): @@ -24,15 +25,15 @@ class DecoratorTestMixin(object): self.assertEqual(wrapper(0), 0) self.assertEqual(len(cache), 1) - self.assertIn(cachetools.hashkey(0), cache) - self.assertNotIn(cachetools.hashkey(1), cache) - self.assertNotIn(cachetools.hashkey(1.0), cache) + self.assertIn(cachetools.keys.hashkey(0), cache) + self.assertNotIn(cachetools.keys.hashkey(1), cache) + self.assertNotIn(cachetools.keys.hashkey(1.0), cache) self.assertEqual(wrapper(1), 1) self.assertEqual(len(cache), 2) - self.assertIn(cachetools.hashkey(0), cache) - self.assertIn(cachetools.hashkey(1), cache) - self.assertIn(cachetools.hashkey(1.0), cache) + self.assertIn(cachetools.keys.hashkey(0), cache) + self.assertIn(cachetools.keys.hashkey(1), cache) + self.assertIn(cachetools.keys.hashkey(1.0), cache) self.assertEqual(wrapper(1), 1) self.assertEqual(len(cache), 2) @@ -45,37 +46,32 @@ class DecoratorTestMixin(object): def test_decorator_typed(self): cache = self.cache(3) - - def typedkey(*args, **kwargs): - key = cachetools.hashkey(*args, **kwargs) - key += tuple(type(v) for v in args) - key += tuple(type(v) for _, v in sorted(kwargs.items())) - return key - wrapper = cachetools.cached(cache, key=typedkey)(self.func) + key = cachetools.keys.typedkey + wrapper = cachetools.cached(cache, key=key)(self.func) self.assertEqual(len(cache), 0) self.assertEqual(wrapper.__wrapped__, self.func) self.assertEqual(wrapper(0), 0) self.assertEqual(len(cache), 1) - self.assertIn(typedkey(0), cache) - self.assertNotIn(typedkey(1), cache) - self.assertNotIn(typedkey(1.0), cache) + self.assertIn(cachetools.keys.typedkey(0), cache) + self.assertNotIn(cachetools.keys.typedkey(1), cache) + self.assertNotIn(cachetools.keys.typedkey(1.0), cache) self.assertEqual(wrapper(1), 1) self.assertEqual(len(cache), 2) - self.assertIn(typedkey(0), cache) - self.assertIn(typedkey(1), cache) - self.assertNotIn(typedkey(1.0), cache) + self.assertIn(cachetools.keys.typedkey(0), cache) + self.assertIn(cachetools.keys.typedkey(1), cache) + self.assertNotIn(cachetools.keys.typedkey(1.0), cache) self.assertEqual(wrapper(1), 1) self.assertEqual(len(cache), 2) self.assertEqual(wrapper(1.0), 2) self.assertEqual(len(cache), 3) - self.assertIn(typedkey(0), cache) - self.assertIn(typedkey(1), cache) - self.assertIn(typedkey(1.0), cache) + self.assertIn(cachetools.keys.typedkey(0), cache) + self.assertIn(cachetools.keys.typedkey(1), cache) + self.assertIn(cachetools.keys.typedkey(1.0), cache) self.assertEqual(wrapper(1.0), 2) self.assertEqual(len(cache), 3) -- cgit v1.2.3 From e27332bc82f4e327aedaec17c9b656ae719322ed Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Mon, 3 Oct 2016 14:00:45 +0200 Subject: Prepare v2.0.0. --- CHANGES.rst | 12 ++++++++++++ cachetools/__init__.py | 4 +--- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index e468c31..6aeabd3 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,15 @@ +v2.0.0 (2016-10-03) +------------------- + +- Drop Python 3.2 support (breaking change). + +- Drop support for deprecated features (breaking change). + +- Move key functions to separate package (breaking change). + +- Accept non-integer ``maxsize`` in ``Cache.__repr__()``. + + v1.1.6 (2016-04-01) ------------------- diff --git a/cachetools/__init__.py b/cachetools/__init__.py index 4144e48..6469970 100644 --- a/cachetools/__init__.py +++ b/cachetools/__init__.py @@ -16,9 +16,7 @@ __all__ = ( 'cached', 'cachedmethod' ) -__version__ = '1.1.6' - -_default = [] # evaluates to False +__version__ = '2.0.0' if hasattr(functools.update_wrapper(lambda f: f(), lambda: 42), '__wrapped__'): _update_wrapper = functools.update_wrapper -- cgit v1.2.3 From dd597feadd2906d2c1af2eb4bedda1f6dcbfd370 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Fri, 11 Aug 2017 18:00:59 +0200 Subject: Fix #85: Update import paths for key functions (courtesy of slavkoja). --- docs/index.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/index.rst b/docs/index.rst index 23c4dae..fe05736 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -211,7 +211,8 @@ often called with the same arguments:: function arguments:: from functools import partial - from cachetools import cached, hashkey, LRUCache + from cachetools import cached, LRUCache + from cachetools.keys import hashkey cache = LRUCache(maxsize=100) -- cgit v1.2.3 From d57c8a3ea7108ee05381d3a0d58329a1c98707a6 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Fri, 11 Aug 2017 18:09:00 +0200 Subject: Update copyright statement, keep flake8 happy. --- LICENSE | 2 +- README.rst | 2 +- cachetools/abc.py | 1 + cachetools/keys.py | 1 + docs/conf.py | 3 ++- setup.py | 1 + 6 files changed, 7 insertions(+), 3 deletions(-) diff --git a/LICENSE b/LICENSE index 73c1611..a424aa2 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ The MIT License (MIT) -Copyright (c) 2014-2016 Thomas Kemmer +Copyright (c) 2014-2017 Thomas Kemmer Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in diff --git a/README.rst b/README.rst index 4742da3..c6cbf02 100644 --- a/README.rst +++ b/README.rst @@ -68,7 +68,7 @@ Project Resources License ------------------------------------------------------------------------ -Copyright (c) 2014-2016 Thomas Kemmer. +Copyright (c) 2014-2017 Thomas Kemmer. Licensed under the `MIT License`_. diff --git a/cachetools/abc.py b/cachetools/abc.py index 41ad736..d265f63 100644 --- a/cachetools/abc.py +++ b/cachetools/abc.py @@ -45,4 +45,5 @@ class DefaultMapping(collections.MutableMapping): self[key] = value = default return value + DefaultMapping.register(dict) diff --git a/cachetools/keys.py b/cachetools/keys.py index ba1e2fc..adb9dad 100644 --- a/cachetools/keys.py +++ b/cachetools/keys.py @@ -21,6 +21,7 @@ class _HashedTuple(tuple): def __radd__(self, other, add=tuple.__add__): return _HashedTuple(add(other, self)) + _kwmark = (object(),) diff --git a/docs/conf.py b/docs/conf.py index d51f10b..1b04038 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -4,8 +4,9 @@ def get_version(filename): metadata = dict(findall(r"__([a-z]+)__ = '([^']+)'", f.read())) return metadata['version'] + project = 'cachetools' -copyright = '2014-2016 Thomas Kemmer' +copyright = '2014-2017 Thomas Kemmer' version = get_version(b'../cachetools/__init__.py') release = version diff --git a/setup.py b/setup.py index 5c0706f..8e11256 100644 --- a/setup.py +++ b/setup.py @@ -7,6 +7,7 @@ def get_version(filename): metadata = dict(findall("__([a-z]+)__ = '([^']+)'", f.read())) return metadata['version'] + setup( name='cachetools', version=get_version('cachetools/__init__.py'), -- cgit v1.2.3 From d5b299c2564ed589f60e0f336fe653f3f03bc69d Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Fri, 11 Aug 2017 18:26:50 +0200 Subject: Fix #76: Move documentation to RTD. --- README.rst | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/README.rst b/README.rst index c6cbf02..850c41e 100644 --- a/README.rst +++ b/README.rst @@ -35,6 +35,9 @@ Multiple cache classes based on different caching algorithms are implemented, and decorators for easily memoizing function and method calls are provided, too. +For more information, please refer to cachetools's online +documentation_. + Installation ------------------------------------------------------------------------ @@ -59,7 +62,10 @@ Project Resources :target: https://coveralls.io/r/tkem/cachetools :alt: Test coverage -- `Documentation`_ +.. image:: https://readthedocs.org/projects/cachetools/badge/?version=latest&style=flat + :target: http://cachetools.readthedocs.io/en/latest/ + :alt: Documentation Status + - `Issue Tracker`_ - `Source Code`_ - `Change Log`_ @@ -78,7 +84,7 @@ Licensed under the `MIT License`_. .. _mapping: http://docs.python.org/dev/glossary.html#term-mapping .. _cache algorithm: http://en.wikipedia.org/wiki/Cache_algorithms -.. _Documentation: http://pythonhosted.org/cachetools/ +.. _Documentation: http://cachetools.readthedocs.io/en/latest/ .. _Issue Tracker: https://github.com/tkem/cachetools/issues/ .. _Source Code: https://github.com/tkem/cachetools/ .. _Change Log: https://github.com/tkem/cachetools/blob/master/CHANGES.rst -- cgit v1.2.3 From 13ba795ad8bcbbf6c7c838de39d189595cf3075d Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Fri, 11 Aug 2017 18:35:58 +0200 Subject: Improve README wording. --- README.rst | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/README.rst b/README.rst index 850c41e..b53399c 100644 --- a/README.rst +++ b/README.rst @@ -35,8 +35,7 @@ Multiple cache classes based on different caching algorithms are implemented, and decorators for easily memoizing function and method calls are provided, too. -For more information, please refer to cachetools's online -documentation_. +For more information, please refer to the online documentation_. Installation -- cgit v1.2.3 From 6bffc176dc6b127f1ee712b5b68bd8d7ac9a927e Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Fri, 11 Aug 2017 18:39:02 +0200 Subject: Fix #86: Officially support Python 3.6. --- .travis.yml | 1 + setup.py | 1 + 2 files changed, 2 insertions(+) diff --git a/.travis.yml b/.travis.yml index 6a2852b..d24ada3 100644 --- a/.travis.yml +++ b/.travis.yml @@ -7,6 +7,7 @@ python: - 3.3 - 3.4 - 3.5 +- 3.6 install: - pip install coveralls tox diff --git a/setup.py b/setup.py index 8e11256..57fa425 100644 --- a/setup.py +++ b/setup.py @@ -32,6 +32,7 @@ setup( 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', + 'Programming Language :: Python :: 3.6', 'Topic :: Software Development :: Libraries :: Python Modules' ] ) -- cgit v1.2.3 From fba5f8a5e8bb48c9ae4eff81d10fe665ff66b066 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Fri, 11 Aug 2017 18:51:13 +0200 Subject: Prepare v2.0.1. --- CHANGES.rst | 11 +++++++++++ cachetools/__init__.py | 2 +- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index 6aeabd3..bf6b13c 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,14 @@ +v2.0.1 (2017-08-11) +------------------- + +- Officially support Python 3.6. + +- Move documentation to RTD. + +- Documentation: Update import paths for key functions (courtesy of + slavkoja). + + v2.0.0 (2016-10-03) ------------------- diff --git a/cachetools/__init__.py b/cachetools/__init__.py index 6469970..54baa46 100644 --- a/cachetools/__init__.py +++ b/cachetools/__init__.py @@ -16,7 +16,7 @@ __all__ = ( 'cached', 'cachedmethod' ) -__version__ = '2.0.0' +__version__ = '2.0.1' if hasattr(functools.update_wrapper(lambda f: f(), lambda: 42), '__wrapped__'): _update_wrapper = functools.update_wrapper -- cgit v1.2.3 From 27c84aa474d123f275573ede0efaa8609c036ae1 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Tue, 5 Dec 2017 23:29:25 +0100 Subject: Keep flake8 happy. --- cachetools/abc.py | 1 - 1 file changed, 1 deletion(-) diff --git a/cachetools/abc.py b/cachetools/abc.py index d265f63..b663d96 100644 --- a/cachetools/abc.py +++ b/cachetools/abc.py @@ -1,7 +1,6 @@ from __future__ import absolute_import import collections - from abc import abstractmethod -- cgit v1.2.3 From bb22d4fc07829c58af9c5b6c012c4e49d93c7669 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Tue, 5 Dec 2017 23:31:48 +0100 Subject: Remove Python 3.3 build. --- .travis.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index d24ada3..c0d952b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -4,7 +4,6 @@ language: python python: - 2.7 -- 3.3 - 3.4 - 3.5 - 3.6 -- cgit v1.2.3 From 2d2a57e49cf52054262f8434d27da82530c8a84e Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Fri, 11 May 2018 18:00:58 +0200 Subject: Fix #83: Improve documentation of replacement strategy. --- docs/index.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/index.rst b/docs/index.rst index fe05736..0a2547e 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -30,9 +30,9 @@ different cache algorithms. All these classes derive from class :class:`Cache`, which in turn derives from :class:`collections.MutableMapping`, and provide :attr:`maxsize` and :attr:`currsize` properties to retrieve the maximum and current size -of the cache. When a cache is full, :meth:`setitem` calls -:meth:`popitem` repeatedly until there is enough room for the item to -be added. +of the cache. When a cache is full, :meth:`Cache.__setitem__()` calls +:meth:`self.popitem()` repeatedly until there is enough room for the +item to be added. All cache classes accept an optional `missing` keyword argument in their constructor, which can be used to provide a default *factory -- cgit v1.2.3 From 396edb913fa74447b15fbc1cbb7aaa204550fed2 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Fri, 11 May 2018 21:58:44 +0200 Subject: Fix #57: Add documentation on how to extend cache classes. --- docs/index.rst | 91 +++++++++++++++++++++++++++++++++++----------------------- 1 file changed, 55 insertions(+), 36 deletions(-) diff --git a/docs/index.rst b/docs/index.rst index 0a2547e..e377604 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -34,34 +34,6 @@ of the cache. When a cache is full, :meth:`Cache.__setitem__()` calls :meth:`self.popitem()` repeatedly until there is enough room for the item to be added. -All cache classes accept an optional `missing` keyword argument in -their constructor, which can be used to provide a default *factory -function*. If the key `key` is not present, the ``cache[key]`` -operation calls :meth:`Cache.__missing__`, which in turn calls -`missing` with `key` as its sole argument. The cache will then store -the object returned from ``missing(key)`` as the new cache value for -`key`, possibly discarding other items if the cache is full. This may -be used to provide memoization for existing single-argument functions:: - - from cachetools import LRUCache - import urllib.request - - def get_pep(num): - """Retrieve text of a Python Enhancement Proposal""" - url = 'http://www.python.org/dev/peps/pep-%04d/' % num - with urllib.request.urlopen(url) as s: - return s.read() - - cache = LRUCache(maxsize=4, missing=get_pep) - - for n in 8, 9, 290, 308, 320, 8, 218, 320, 279, 289, 320, 9991: - try: - print(n, len(cache[n])) - except urllib.error.HTTPError: - print(n, 'Not Found') - print(sorted(cache.keys())) - - :class:`Cache` also features a :meth:`getsizeof` method, which returns the size of a given `value`. The default implementation of :meth:`getsizeof` returns :const:`1` irrespective of its argument, @@ -70,6 +42,7 @@ making the cache's size equal to the number of its items, or named constructor parameter `getsizeof`, which may specify a function of one argument used to retrieve the size of an item's value. + .. autoclass:: Cache :members: @@ -78,11 +51,7 @@ of one argument used to retrieve the size of an item's value. to implement specific caching strategies. If a subclass has to keep track of item access, insertion or deletion, it may additionally need to override :meth:`__getitem__`, - :meth:`__setitem__` and :meth:`__delitem__`. If a subclass wants - to store meta data with its values, i.e. the `value` argument - passed to :meth:`Cache.__setitem__` is different from what the - derived class's :meth:`__setitem__` received, it will probably need - to override :meth:`getsizeof`, too. + :meth:`__setitem__` and :meth:`__delitem__`. .. autoclass:: LFUCache :members: @@ -108,8 +77,7 @@ of one argument used to retrieve the size of an item's value. non-empty sequence. .. autoclass:: TTLCache(maxsize, ttl, timer=time.time, missing=None, getsizeof=None) - :members: - :exclude-members: expire + :members: popitem, timer, ttl This class associates a time-to-live value with each item. Items that expire because they have exceeded their time-to-live will be @@ -122,7 +90,7 @@ of one argument used to retrieve the size of an item's value. :func:`time.time` function is used to retrieve the current time. A custom `timer` function can be supplied if needed. - .. automethod:: expire(self, time=None) + .. method:: expire(self, time=None) Since expired items will be "physically" removed from a cache only at the next mutating operation, e.g. :meth:`__setitem__` or @@ -135,6 +103,57 @@ of one argument used to retrieve the size of an item's value. expired by the current value returned by :attr:`timer`. +Extending cache classes +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Sometimes it may be desirable to notice when and what cache items are +evicted, i.e. removed from a cache to make room for new items. Since +all cache implementations call :meth:`popitem` to evict items from the +cache, this can be achieved by overriding this method in a subclass:: + + >>> from cachetools import LRUCache + >>> class MyCache(LRUCache): + ... def popitem(self): + ... key, value = super().popitem() + ... print('Key "%s" evicted with value "%s"' % (key, value)) + ... return key, value + ... + >>> c = MyCache(maxsize=2) + >>> c['a'] = 1 + >>> c['b'] = 2 + >>> c['c'] = 3 + Key "a" evicted with value "1" + +Similar to the standard library's :class:`collections.defaultdict`, +subclasses of :class:`Cache` may implement a :meth:`__missing__` +method which is called by :meth:`Cache.__getitem__` if the requested +key is not found:: + + >>> from cachetools import LRUCache + >>> import urllib.request + >>> class PepStore(LRUCache): + ... def __missing__(self, key): + ... """Retrieve text of a Python Enhancement Proposal""" + ... url = 'http://www.python.org/dev/peps/pep-%04d/' % key + ... try: + ... with urllib.request.urlopen(url) as s: + ... pep = s.read() + ... self[key] = pep # store text in cache + ... return pep + ... except urllib.error.HTTPError: + ... return 'Not Found' # do not store in cache + >>> peps = PepStore(maxsize=4) + >>> for n in 8, 9, 290, 308, 320, 8, 218, 320, 279, 289, 320, 9991: + ... pep = peps[n] + >>> print(sorted(peps.keys())) + [218, 279, 289, 320] + +Note, though, that such a class does not really behave like a *cache* +any more, and will lead to surprising results when used with any of +the memoizing decorators described below. However, it may be useful +in its own right. + + Memoizing decorators ------------------------------------------------------------------------ -- cgit v1.2.3 From 7bb48e4577a3194dcc8aa4519f173d67fdf45e33 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Fri, 11 May 2018 22:23:05 +0200 Subject: Fix #69: Better explain when expired TTLCache items are actually removed. --- docs/index.rst | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/docs/index.rst b/docs/index.rst index e377604..2bae338 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -81,10 +81,9 @@ of one argument used to retrieve the size of an item's value. This class associates a time-to-live value with each item. Items that expire because they have exceeded their time-to-live will be - removed automatically. If no expired items are there to remove, - the least recently used items will be discarded first to make space - when necessary. Trying to access an expired item will raise a - :exc:`KeyError`. + no longer accessible, and will be removed eventually. If no + expired items are there to remove, the least recently used items + will be discarded first to make space when necessary. By default, the time-to-live is specified in seconds, and the :func:`time.time` function is used to retrieve the current time. A @@ -92,15 +91,14 @@ of one argument used to retrieve the size of an item's value. .. method:: expire(self, time=None) - Since expired items will be "physically" removed from a cache - only at the next mutating operation, e.g. :meth:`__setitem__` or - :meth:`__delitem__`, to avoid changing the underlying dictionary - while iterating over it, expired items may still claim memory - although they are no longer accessible. Calling this method - removes all items whose time-to-live would have expired by - `time`, so garbage collection is free to reuse their memory. If - `time` is :const:`None`, this removes all items that have - expired by the current value returned by :attr:`timer`. + Expired items will be removed from a cache only at the next + mutating operation, e.g. :meth:`__setitem__` or + :meth:`__delitem__`, and therefore may still claim memory. + Calling this method removes all items whose time-to-live would + have expired by `time`, so garbage collection is free to reuse + their memory. If `time` is :const:`None`, this removes all + items that have expired by the current value returned by + :attr:`timer`. Extending cache classes -- cgit v1.2.3 From 3c055ca5784e54bd5f08243e46e8a0d045a59dce Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Sat, 12 May 2018 12:00:11 +0200 Subject: Add unit tests for overridden getsizeof() method. --- tests/__init__.py | 82 ++++++++++++++++++++++++++++++++---------------- tests/test_cache.py | 5 ++- tests/test_lfu.py | 7 ++--- tests/test_lru.py | 7 ++--- tests/test_rr.py | 14 ++++++--- tests/test_ttl.py | 90 ++++++++++++++++++++++++----------------------------- 6 files changed, 113 insertions(+), 92 deletions(-) diff --git a/tests/__init__.py b/tests/__init__.py index 401dd42..c3dde25 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -1,10 +1,13 @@ class CacheTestMixin(object): - def cache(self, maxsize, missing=None, getsizeof=None): - raise NotImplementedError + Cache = None - def test_cache_defaults(self): - cache = self.cache(maxsize=1) +# def cache(self, maxsize, missing=None, getsizeof=None): +# c = self.Cache(maxsize, missing=missing, getsizeof=getsizeof) +# return c + + def test_defaults(self): + cache = self.Cache(maxsize=1) self.assertEqual(0, len(cache)) self.assertEqual(1, cache.maxsize) self.assertEqual(0, cache.currsize) @@ -13,8 +16,8 @@ class CacheTestMixin(object): self.assertEqual(1, cache.getsizeof(0)) self.assertTrue(repr(cache).startswith(cache.__class__.__name__)) - def test_cache_insert(self): - cache = self.cache(maxsize=2) + def test_insert(self): + cache = self.Cache(maxsize=2) cache.update({1: 1, 2: 2}) self.assertEqual(2, len(cache)) @@ -31,8 +34,8 @@ class CacheTestMixin(object): self.assertEqual(4, cache[4]) self.assertTrue(1 in cache or 2 in cache or 3 in cache) - def test_cache_update(self): - cache = self.cache(maxsize=2) + def test_update(self): + cache = self.Cache(maxsize=2) cache.update({1: 1, 2: 2}) self.assertEqual(2, len(cache)) @@ -49,8 +52,8 @@ class CacheTestMixin(object): self.assertEqual('a', cache[1]) self.assertEqual('b', cache[2]) - def test_cache_delete(self): - cache = self.cache(maxsize=2) + def test_delete(self): + cache = self.Cache(maxsize=2) cache.update({1: 1, 2: 2}) self.assertEqual(2, len(cache)) @@ -73,8 +76,8 @@ class CacheTestMixin(object): self.assertNotIn(1, cache) self.assertNotIn(2, cache) - def test_cache_pop(self): - cache = self.cache(maxsize=2) + def test_pop(self): + cache = self.Cache(maxsize=2) cache.update({1: 1, 2: 2}) self.assertEqual(2, cache.pop(2)) @@ -93,8 +96,8 @@ class CacheTestMixin(object): self.assertEqual(None, cache.pop(1, None)) self.assertEqual(None, cache.pop(0, None)) - def test_cache_popitem(self): - cache = self.cache(maxsize=2) + def test_popitem(self): + cache = self.Cache(maxsize=2) cache.update({1: 1, 2: 2}) self.assertIn(cache.pop(1), {1: 1, 2: 2}) @@ -105,9 +108,9 @@ class CacheTestMixin(object): with self.assertRaises(KeyError): cache.popitem() - def test_cache_missing(self): - cache = self.cache(maxsize=2, missing=lambda x: x) - + def _test_missing(self, cache): + self.assertEqual(0, cache.currsize) + self.assertEqual(2, cache.maxsize) self.assertEqual(0, len(cache)) self.assertEqual(1, cache[1]) self.assertEqual(2, cache[2]) @@ -157,8 +160,9 @@ class CacheTestMixin(object): self.assertTrue(1 in cache or 2 in cache) self.assertTrue(1 not in cache or 2 not in cache) - cache = self.cache(maxsize=2, missing=lambda x: x, - getsizeof=lambda x: x) + def _test_missing_getsizeof(self, cache): + self.assertEqual(0, cache.currsize) + self.assertEqual(2, cache.maxsize) self.assertEqual(1, cache[1]) self.assertIn(1, cache) self.assertEqual(2, cache[2]) @@ -169,10 +173,26 @@ class CacheTestMixin(object): self.assertIn(2, cache) self.assertNotIn(3, cache) - def test_cache_getsizeof(self): - cache = self.cache(maxsize=3, getsizeof=lambda x: x) - self.assertEqual(3, cache.maxsize) + def test_missing_param(self): + self._test_missing(self.Cache(maxsize=2, missing=lambda x: x)) + self._test_missing_getsizeof(self.Cache(maxsize=2, missing=lambda x: x, + getsizeof=lambda x: x)) + + def test_missing_subclass(self): + class Cache(self.Cache): + def __missing__(self, key): + try: + self[key] = key + except ValueError: + pass + return key + + self._test_missing(Cache(maxsize=2)) + self._test_missing_getsizeof(Cache(maxsize=2, getsizeof=lambda x: x)) + + def _test_getsizeof(self, cache): self.assertEqual(0, cache.currsize) + self.assertEqual(3, cache.maxsize) self.assertEqual(1, cache.getsizeof(1)) self.assertEqual(2, cache.getsizeof(2)) self.assertEqual(3, cache.getsizeof(3)) @@ -214,10 +234,20 @@ class CacheTestMixin(object): self.assertEqual(3, cache.currsize) self.assertEqual(3, cache[3]) - def test_cache_pickle(self): + def test_getsizeof_param(self): + self._test_getsizeof(self.Cache(maxsize=3, getsizeof=lambda x: x)) + + def test_getsizeof_subclass(self): + class Cache(self.Cache): + def getsizeof(self, value): + return value + + self._test_getsizeof(Cache(maxsize=3)) + + def test_pickle(self): import pickle - source = self.cache(maxsize=2) + source = self.Cache(maxsize=2) source.update({1: 1, 2: 2}) cache = pickle.loads(pickle.dumps(source)) @@ -239,13 +269,13 @@ class CacheTestMixin(object): self.assertEqual(cache, pickle.loads(pickle.dumps(cache))) - def test_cache_pickle_maxsize(self): + def test_pickle_maxsize(self): import pickle import sys # test empty cache, single element, large cache (recursion limit) for n in [0, 1, sys.getrecursionlimit() * 2]: - source = self.cache(maxsize=n) + source = self.Cache(maxsize=n) source.update((i, i) for i in range(n)) cache = pickle.loads(pickle.dumps(source)) self.assertEqual(n, len(cache)) diff --git a/tests/test_cache.py b/tests/test_cache.py index 3b78515..ef87877 100644 --- a/tests/test_cache.py +++ b/tests/test_cache.py @@ -1,11 +1,10 @@ import unittest -from cachetools import Cache +import cachetools from . import CacheTestMixin class CacheTest(unittest.TestCase, CacheTestMixin): - def cache(self, maxsize, missing=None, getsizeof=None): - return Cache(maxsize, missing=missing, getsizeof=getsizeof) + Cache = cachetools.Cache diff --git a/tests/test_lfu.py b/tests/test_lfu.py index fe0437e..6679a88 100644 --- a/tests/test_lfu.py +++ b/tests/test_lfu.py @@ -7,11 +7,10 @@ from . import CacheTestMixin class LFUCacheTest(unittest.TestCase, CacheTestMixin): - def cache(self, maxsize, missing=None, getsizeof=None): - return LFUCache(maxsize, missing=missing, getsizeof=getsizeof) + Cache = LFUCache def test_lfu(self): - cache = self.cache(maxsize=2) + cache = LFUCache(maxsize=2) cache[1] = 1 cache[1] @@ -29,7 +28,7 @@ class LFUCacheTest(unittest.TestCase, CacheTestMixin): self.assertEqual(cache[1], 1) def test_lfu_getsizeof(self): - cache = self.cache(maxsize=3, getsizeof=lambda x: x) + cache = LFUCache(maxsize=3, getsizeof=lambda x: x) cache[1] = 1 cache[2] = 2 diff --git a/tests/test_lru.py b/tests/test_lru.py index a94a517..fe97803 100644 --- a/tests/test_lru.py +++ b/tests/test_lru.py @@ -7,11 +7,10 @@ from . import CacheTestMixin class LRUCacheTest(unittest.TestCase, CacheTestMixin): - def cache(self, maxsize, missing=None, getsizeof=None): - return LRUCache(maxsize, missing=missing, getsizeof=getsizeof) + Cache = LRUCache def test_lru(self): - cache = self.cache(maxsize=2) + cache = LRUCache(maxsize=2) cache[1] = 1 cache[2] = 2 @@ -36,7 +35,7 @@ class LRUCacheTest(unittest.TestCase, CacheTestMixin): self.assertNotIn(2, cache) def test_lru_getsizeof(self): - cache = self.cache(maxsize=3, getsizeof=lambda x: x) + cache = LRUCache(maxsize=3, getsizeof=lambda x: x) cache[1] = 1 cache[2] = 2 diff --git a/tests/test_rr.py b/tests/test_rr.py index 14a4b07..cff18bf 100644 --- a/tests/test_rr.py +++ b/tests/test_rr.py @@ -11,14 +11,18 @@ def choice(seq): return random.choice(seq) +class RRTestCache(RRCache): + def __init__(self, maxsize, choice=choice, missing=None, getsizeof=None): + RRCache.__init__(self, maxsize, choice=choice, + missing=missing, getsizeof=getsizeof) + + class RRCacheTest(unittest.TestCase, CacheTestMixin): - def cache(self, maxsize, choice=choice, missing=None, getsizeof=None): - return RRCache(maxsize, choice=choice, missing=missing, - getsizeof=getsizeof) + Cache = RRTestCache - def test_choice(self): - cache = self.cache(maxsize=2, choice=min) + def test_rr_choice(self): + cache = RRCache(maxsize=2, choice=min) self.assertEqual(min, cache.choice) cache[1] = 1 diff --git a/tests/test_ttl.py b/tests/test_ttl.py index 0927f5f..e8a086c 100644 --- a/tests/test_ttl.py +++ b/tests/test_ttl.py @@ -19,42 +19,18 @@ class Timer: self.time += 1 -class TTLCacheTest(unittest.TestCase, CacheTestMixin): - - def cache(self, maxsize, ttl=0, missing=None, getsizeof=None): - return TTLCache(maxsize, ttl, timer=Timer(), missing=missing, - getsizeof=getsizeof) +class TTLTestCache(TTLCache): + def __init__(self, maxsize, ttl=0, missing=None, getsizeof=None): + TTLCache.__init__(self, maxsize, ttl=ttl, timer=Timer(), + missing=missing, getsizeof=getsizeof) - def test_lru(self): - cache = self.cache(maxsize=2) - - cache[1] = 1 - cache[2] = 2 - cache[3] = 3 - self.assertEqual(len(cache), 2) - self.assertNotIn(1, cache) - self.assertEqual(cache[2], 2) - self.assertEqual(cache[3], 3) - - cache[2] - cache[4] = 4 - self.assertEqual(len(cache), 2) - self.assertNotIn(1, cache) - self.assertEqual(cache[2], 2) - self.assertNotIn(3, cache) - self.assertEqual(cache[4], 4) +class TTLCacheTest(unittest.TestCase, CacheTestMixin): - cache[5] = 5 - self.assertEqual(len(cache), 2) - self.assertNotIn(1, cache) - self.assertNotIn(2, cache) - self.assertNotIn(3, cache) - self.assertEqual(cache[4], 4) - self.assertEqual(cache[5], 5) + Cache = TTLTestCache def test_ttl(self): - cache = self.cache(maxsize=2, ttl=1) + cache = TTLCache(maxsize=2, ttl=1, timer=Timer()) self.assertEqual(0, cache.timer()) self.assertEqual(1, cache.ttl) @@ -108,8 +84,36 @@ class TTLCacheTest(unittest.TestCase, CacheTestMixin): with self.assertRaises(KeyError): del cache[3] - def test_expire(self): - cache = self.cache(maxsize=3, ttl=2) + def test_ttl_lru(self): + cache = TTLCache(maxsize=2, ttl=0, timer=Timer()) + + cache[1] = 1 + cache[2] = 2 + cache[3] = 3 + + self.assertEqual(len(cache), 2) + self.assertNotIn(1, cache) + self.assertEqual(cache[2], 2) + self.assertEqual(cache[3], 3) + + cache[2] + cache[4] = 4 + self.assertEqual(len(cache), 2) + self.assertNotIn(1, cache) + self.assertEqual(cache[2], 2) + self.assertNotIn(3, cache) + self.assertEqual(cache[4], 4) + + cache[5] = 5 + self.assertEqual(len(cache), 2) + self.assertNotIn(1, cache) + self.assertNotIn(2, cache) + self.assertNotIn(3, cache) + self.assertEqual(cache[4], 4) + self.assertEqual(cache[5], 5) + + def test_ttl_expire(self): + cache = TTLCache(maxsize=3, ttl=2, timer=Timer()) with cache.timer as time: self.assertEqual(time, cache.timer()) self.assertEqual(2, cache.ttl) @@ -155,7 +159,7 @@ class TTLCacheTest(unittest.TestCase, CacheTestMixin): self.assertNotIn(2, cache) self.assertNotIn(3, cache) - def test_atomic(self): + def test_ttl_atomic(self): cache = TTLCache(maxsize=1, ttl=1, timer=Timer(auto=True)) cache[1] = 1 self.assertEqual(1, cache[1]) @@ -169,22 +173,8 @@ class TTLCacheTest(unittest.TestCase, CacheTestMixin): cache.clear() self.assertEqual(0, len(cache)) - def test_missing(self): - class DefaultTTLCache(TTLCache): - def __missing__(self, key): - self[key] = key - return key - - cache = DefaultTTLCache(maxsize=1, ttl=1, timer=Timer()) - self.assertEqual(1, cache[1]) - self.assertIn(1, cache) - self.assertNotIn(2, cache) - self.assertEqual(2, cache[2]) - self.assertNotIn(1, cache) - self.assertIn(2, cache) - - def test_tuple_key(self): - cache = self.cache(maxsize=1, ttl=0) + def test_ttl_tuple_key(self): + cache = TTLCache(maxsize=1, ttl=0, timer=Timer()) self.assertEqual(0, cache.ttl) cache[(1, 2, 3)] = 42 -- cgit v1.2.3 From d456411316cca0ea7bf709feee013f5a816d9de6 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Sat, 12 May 2018 12:06:01 +0200 Subject: Fix #94: Handle overridden getsizeof() method. --- cachetools/cache.py | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/cachetools/cache.py b/cachetools/cache.py index 0852631..bab7ae9 100644 --- a/cachetools/cache.py +++ b/cachetools/cache.py @@ -23,7 +23,8 @@ class Cache(DefaultMapping): if missing: self.__missing = missing if getsizeof: - self.__getsizeof = getsizeof + self.getsizeof = getsizeof + if self.getsizeof is not Cache.getsizeof: self.__size = dict() self.__data = dict() self.__currsize = 0 @@ -81,14 +82,6 @@ class Cache(DefaultMapping): def __len__(self): return len(self.__data) - @staticmethod - def __getsizeof(value): - return 1 - - @staticmethod - def __missing(key): - raise KeyError(key) - @property def maxsize(self): """The maximum size of the cache.""" @@ -99,6 +92,11 @@ class Cache(DefaultMapping): """The current size of the cache.""" return self.__currsize - def getsizeof(self, value): + @staticmethod + def getsizeof(value): """Return the size of a cache element's value.""" - return self.__getsizeof(value) + return 1 + + @staticmethod + def __missing(key): + raise KeyError(key) -- cgit v1.2.3 From 83335b6d9281380ce5268f33fb930d8702814b28 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Sat, 12 May 2018 12:57:44 +0200 Subject: Fix #95: Fix Python 2.7 RRCache pickling issues. --- cachetools/rr.py | 11 ++++++++++- tests/test_rr.py | 16 ++-------------- 2 files changed, 12 insertions(+), 15 deletions(-) diff --git a/cachetools/rr.py b/cachetools/rr.py index 8cd856c..3fa9f12 100644 --- a/cachetools/rr.py +++ b/cachetools/rr.py @@ -5,13 +5,22 @@ import random from .cache import Cache +# random.choice cannot be pickled in Python 2.7 +def _choice(seq): + return random.choice(seq) + + class RRCache(Cache): """Random Replacement (RR) cache implementation.""" def __init__(self, maxsize, choice=random.choice, missing=None, getsizeof=None): Cache.__init__(self, maxsize, missing, getsizeof) - self.__choice = choice + # TODO: use None as default, assing to self.choice directly? + if choice is random.choice: + self.__choice = _choice + else: + self.__choice = choice @property def choice(self): diff --git a/tests/test_rr.py b/tests/test_rr.py index cff18bf..008978b 100644 --- a/tests/test_rr.py +++ b/tests/test_rr.py @@ -1,4 +1,3 @@ -import random import unittest from cachetools import RRCache @@ -6,22 +5,11 @@ from cachetools import RRCache from . import CacheTestMixin -# random.choice cannot be pickled... -def choice(seq): - return random.choice(seq) - - -class RRTestCache(RRCache): - def __init__(self, maxsize, choice=choice, missing=None, getsizeof=None): - RRCache.__init__(self, maxsize, choice=choice, - missing=missing, getsizeof=getsizeof) - - class RRCacheTest(unittest.TestCase, CacheTestMixin): - Cache = RRTestCache + Cache = RRCache - def test_rr_choice(self): + def test_rr(self): cache = RRCache(maxsize=2, choice=min) self.assertEqual(min, cache.choice) -- cgit v1.2.3 From 8297633af984fc24efabef3b5eba0c3324c622f8 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Sat, 12 May 2018 14:30:56 +0200 Subject: Fix #58: Deprecate 'missing' Cache constructor parameter. --- cachetools/cache.py | 14 +++++++++++--- cachetools/lfu.py | 4 ++-- cachetools/lru.py | 4 ++-- cachetools/rr.py | 4 ++-- cachetools/ttl.py | 4 ++-- docs/index.rst | 31 ++++++++++++++++++++++++++++--- tests/__init__.py | 23 ++++++++++++++++------- tests/test_ttl.py | 5 ++--- 8 files changed, 65 insertions(+), 24 deletions(-) diff --git a/cachetools/cache.py b/cachetools/cache.py index bab7ae9..a9a3e57 100644 --- a/cachetools/cache.py +++ b/cachetools/cache.py @@ -1,5 +1,7 @@ from __future__ import absolute_import +from warnings import warn + from .abc import DefaultMapping @@ -14,14 +16,20 @@ class _DefaultSize(object): return 1 +_deprecated = object() + + class Cache(DefaultMapping): """Mutable mapping to serve as a simple cache or cache base class.""" __size = _DefaultSize() - def __init__(self, maxsize, missing=None, getsizeof=None): - if missing: - self.__missing = missing + def __init__(self, maxsize, missing=_deprecated, getsizeof=None): + if missing is not _deprecated: + warn("Cache constructor parameter 'missing' is deprecated", + DeprecationWarning, 3) + if missing: + self.__missing = missing if getsizeof: self.getsizeof = getsizeof if self.getsizeof is not Cache.getsizeof: diff --git a/cachetools/lfu.py b/cachetools/lfu.py index f5817a4..76a264a 100644 --- a/cachetools/lfu.py +++ b/cachetools/lfu.py @@ -2,13 +2,13 @@ from __future__ import absolute_import import collections -from .cache import Cache +from .cache import Cache, _deprecated class LFUCache(Cache): """Least Frequently Used (LFU) cache implementation.""" - def __init__(self, maxsize, missing=None, getsizeof=None): + def __init__(self, maxsize, missing=_deprecated, getsizeof=None): Cache.__init__(self, maxsize, missing, getsizeof) self.__counter = collections.Counter() diff --git a/cachetools/lru.py b/cachetools/lru.py index b945797..991b0e2 100644 --- a/cachetools/lru.py +++ b/cachetools/lru.py @@ -2,13 +2,13 @@ from __future__ import absolute_import import collections -from .cache import Cache +from .cache import Cache, _deprecated class LRUCache(Cache): """Least Recently Used (LRU) cache implementation.""" - def __init__(self, maxsize, missing=None, getsizeof=None): + def __init__(self, maxsize, missing=_deprecated, getsizeof=None): Cache.__init__(self, maxsize, missing, getsizeof) self.__order = collections.OrderedDict() diff --git a/cachetools/rr.py b/cachetools/rr.py index 3fa9f12..1aeed43 100644 --- a/cachetools/rr.py +++ b/cachetools/rr.py @@ -2,7 +2,7 @@ from __future__ import absolute_import import random -from .cache import Cache +from .cache import Cache, _deprecated # random.choice cannot be pickled in Python 2.7 @@ -13,7 +13,7 @@ def _choice(seq): class RRCache(Cache): """Random Replacement (RR) cache implementation.""" - def __init__(self, maxsize, choice=random.choice, missing=None, + def __init__(self, maxsize, choice=random.choice, missing=_deprecated, getsizeof=None): Cache.__init__(self, maxsize, missing, getsizeof) # TODO: use None as default, assing to self.choice directly? diff --git a/cachetools/ttl.py b/cachetools/ttl.py index d20cc0b..d4c3b37 100644 --- a/cachetools/ttl.py +++ b/cachetools/ttl.py @@ -3,7 +3,7 @@ from __future__ import absolute_import import collections import time -from .cache import Cache +from .cache import Cache, _deprecated class _Link(object): @@ -57,7 +57,7 @@ class _Timer(object): class TTLCache(Cache): """LRU Cache implementation with per-item time-to-live (TTL) value.""" - def __init__(self, maxsize, ttl, timer=time.time, missing=None, + def __init__(self, maxsize, ttl, timer=time.time, missing=_deprecated, getsizeof=None): Cache.__init__(self, maxsize, missing, getsizeof) self.__root = root = _Link() diff --git a/docs/index.rst b/docs/index.rst index 2bae338..ac3b053 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -43,7 +43,7 @@ named constructor parameter `getsizeof`, which may specify a function of one argument used to retrieve the size of an item's value. -.. autoclass:: Cache +.. autoclass:: Cache(maxsize, missing=None, getsizeof=None) :members: This class discards arbitrary items using :meth:`popitem` to make @@ -53,18 +53,33 @@ of one argument used to retrieve the size of an item's value. additionally need to override :meth:`__getitem__`, :meth:`__setitem__` and :meth:`__delitem__`. -.. autoclass:: LFUCache + .. deprecated:: 2.1 + + The `missing` argument. Override :meth:`__missing__` in a + subclass instead. + +.. autoclass:: LFUCache(maxsize, missing=None, getsizeof=None) :members: This class counts how often an item is retrieved, and discards the items used least often to make space when necessary. -.. autoclass:: LRUCache + .. deprecated:: 2.1 + + The `missing` argument. Override :meth:`__missing__` in a + subclass instead. + +.. autoclass:: LRUCache(maxsize, missing=None, getsizeof=None) :members: This class discards the least recently used items first to make space when necessary. + .. deprecated:: 2.1 + + The `missing` argument. Override :meth:`__missing__` in a + subclass instead. + .. autoclass:: RRCache(maxsize, choice=random.choice, missing=None, getsizeof=None) :members: @@ -76,6 +91,11 @@ of one argument used to retrieve the size of an item's value. an alternative function that returns an arbitrary element from a non-empty sequence. + .. deprecated:: 2.1 + + The `missing` argument. Override :meth:`__missing__` in a + subclass instead. + .. autoclass:: TTLCache(maxsize, ttl, timer=time.time, missing=None, getsizeof=None) :members: popitem, timer, ttl @@ -89,6 +109,11 @@ of one argument used to retrieve the size of an item's value. :func:`time.time` function is used to retrieve the current time. A custom `timer` function can be supplied if needed. + .. deprecated:: 2.1 + + The `missing` argument. Override :meth:`__missing__` in a + subclass instead. + .. method:: expire(self, time=None) Expired items will be removed from a cache only at the next diff --git a/tests/__init__.py b/tests/__init__.py index c3dde25..f1d69f9 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -1,11 +1,10 @@ +import warnings + + class CacheTestMixin(object): Cache = None -# def cache(self, maxsize, missing=None, getsizeof=None): -# c = self.Cache(maxsize, missing=missing, getsizeof=getsizeof) -# return c - def test_defaults(self): cache = self.Cache(maxsize=1) self.assertEqual(0, len(cache)) @@ -174,9 +173,19 @@ class CacheTestMixin(object): self.assertNotIn(3, cache) def test_missing_param(self): - self._test_missing(self.Cache(maxsize=2, missing=lambda x: x)) - self._test_missing_getsizeof(self.Cache(maxsize=2, missing=lambda x: x, - getsizeof=lambda x: x)) + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + c = self.Cache(2, missing=lambda x: x) + self.assertEqual(len(w), 1) + self.assertIs(w[0].category, DeprecationWarning) + self._test_missing(c) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + c = self.Cache(2, missing=lambda x: x, getsizeof=lambda x: x) + self.assertEqual(len(w), 1) + self.assertIs(w[0].category, DeprecationWarning) + self._test_missing_getsizeof(c) def test_missing_subclass(self): class Cache(self.Cache): diff --git a/tests/test_ttl.py b/tests/test_ttl.py index e8a086c..f677c9b 100644 --- a/tests/test_ttl.py +++ b/tests/test_ttl.py @@ -20,9 +20,8 @@ class Timer: class TTLTestCache(TTLCache): - def __init__(self, maxsize, ttl=0, missing=None, getsizeof=None): - TTLCache.__init__(self, maxsize, ttl=ttl, timer=Timer(), - missing=missing, getsizeof=getsizeof) + def __init__(self, maxsize, ttl=0, **kwargs): + TTLCache.__init__(self, maxsize, ttl=ttl, timer=Timer(), **kwargs) class TTLCacheTest(unittest.TestCase, CacheTestMixin): -- cgit v1.2.3 From 45d9b0da1b044bb6a7e1722d0942ed9e3b4fb78b Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Sat, 12 May 2018 14:41:46 +0200 Subject: Prepare v2.1.0. --- CHANGES.rst | 12 ++++++++++++ LICENSE | 2 +- README.rst | 2 +- cachetools/__init__.py | 2 +- docs/conf.py | 2 +- 5 files changed, 16 insertions(+), 4 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index bf6b13c..2e31145 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,15 @@ +v2.1.0 (UNRELEASED) +------------------- + +- Deprecate ``missing`` cache constructor parameter. + +- Handle overridden ``getsizeof()`` method in subclasses. + +- Fix Python 2.7 ``RRCache`` pickling issues. + +- Various documentation improvements. + + v2.0.1 (2017-08-11) ------------------- diff --git a/LICENSE b/LICENSE index a424aa2..9faa2f0 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ The MIT License (MIT) -Copyright (c) 2014-2017 Thomas Kemmer +Copyright (c) 2014-2018 Thomas Kemmer Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in diff --git a/README.rst b/README.rst index b53399c..125e781 100644 --- a/README.rst +++ b/README.rst @@ -73,7 +73,7 @@ Project Resources License ------------------------------------------------------------------------ -Copyright (c) 2014-2017 Thomas Kemmer. +Copyright (c) 2014-2018 Thomas Kemmer. Licensed under the `MIT License`_. diff --git a/cachetools/__init__.py b/cachetools/__init__.py index 54baa46..ca67c58 100644 --- a/cachetools/__init__.py +++ b/cachetools/__init__.py @@ -16,7 +16,7 @@ __all__ = ( 'cached', 'cachedmethod' ) -__version__ = '2.0.1' +__version__ = '2.1.0' if hasattr(functools.update_wrapper(lambda f: f(), lambda: 42), '__wrapped__'): _update_wrapper = functools.update_wrapper diff --git a/docs/conf.py b/docs/conf.py index 1b04038..9a64411 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -6,7 +6,7 @@ def get_version(filename): project = 'cachetools' -copyright = '2014-2017 Thomas Kemmer' +copyright = '2014-2018 Thomas Kemmer' version = get_version(b'../cachetools/__init__.py') release = version -- cgit v1.2.3 From 36d864e9584224a6b336654f46b4032994e9b507 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Sat, 12 May 2018 18:21:47 +0200 Subject: Set release date. --- CHANGES.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index 2e31145..53b4fd4 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,4 +1,4 @@ -v2.1.0 (UNRELEASED) +v2.1.0 (2018-05-12) ------------------- - Deprecate ``missing`` cache constructor parameter. -- cgit v1.2.3 From 91ad85bc3074962dcd98c811429cf058eeecdcb1 Mon Sep 17 00:00:00 2001 From: Sergey B Kirpichev Date: Tue, 12 Jun 2018 21:48:44 +0300 Subject: Test on PyPy Python 3 implementation weakref test was adapted to explicitely call gc.collect(), other tests pass without any modifications. Also add supported Python implementations to classifiers. --- .travis.yml | 1 + setup.py | 2 ++ tests/test_method.py | 2 ++ 3 files changed, 5 insertions(+) diff --git a/.travis.yml b/.travis.yml index c0d952b..901134e 100644 --- a/.travis.yml +++ b/.travis.yml @@ -7,6 +7,7 @@ python: - 3.4 - 3.5 - 3.6 +- pypy3.5 install: - pip install coveralls tox diff --git a/setup.py b/setup.py index 57fa425..3427cb3 100644 --- a/setup.py +++ b/setup.py @@ -33,6 +33,8 @@ setup( 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: Implementation :: CPython', + 'Programming Language :: Python :: Implementation :: PyPy', 'Topic :: Software Development :: Libraries :: Python Modules' ] ) diff --git a/tests/test_method.py b/tests/test_method.py index db810b6..2095970 100644 --- a/tests/test_method.py +++ b/tests/test_method.py @@ -109,6 +109,7 @@ class CachedMethodTest(unittest.TestCase): def test_weakref(self): import weakref import fractions + import gc # in Python 3.4, `int` does not support weak references even # when subclassed, but Fraction apparently does... @@ -119,6 +120,7 @@ class CachedMethodTest(unittest.TestCase): cached = Cached(weakref.WeakValueDictionary(), count=Int(0)) self.assertEqual(cached.get(0), 0) + gc.collect() self.assertEqual(cached.get(0), 1) ref = cached.get(1) -- cgit v1.2.3 From 211a498069d676b5aa12f83868590d73e16e4726 Mon Sep 17 00:00:00 2001 From: Sergey B Kirpichev Date: Tue, 12 Jun 2018 21:56:02 +0300 Subject: Gitignore .pytest_cache/ --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 1a39b30..235e4aa 100644 --- a/.gitignore +++ b/.gitignore @@ -8,3 +8,4 @@ MANIFEST build/ dist/ docs/_build/ +.pytest_cache/ -- cgit v1.2.3 From 44487764050f65ddb0f5e3ffa6fb2fb306397ece Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Thu, 14 Jun 2018 10:15:37 +0200 Subject: Fix #99: Correct "envkey" example. --- docs/index.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/index.rst b/docs/index.rst index ac3b053..e5fbf59 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -348,7 +348,7 @@ specially:: def envkey(*args, env={}, **kwargs): key = hashkey(*args, **kwargs) - key += tuple(env.items()) + key += tuple(sorted(env.items())) return key The :func:`envkey` function can then be used in decorator declarations -- cgit v1.2.3 From 9185e065616a520c4e9d26daefe54c842198c313 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Sat, 27 Oct 2018 22:33:19 +0200 Subject: Fix #110: Officially support Python 3.7. --- .travis.yml | 5 ++++- setup.py | 2 +- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index 901134e..dbabe75 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,4 +1,6 @@ -sudo: false +sudo: true + +dist: xenial language: python @@ -7,6 +9,7 @@ python: - 3.4 - 3.5 - 3.6 +- 3.7 - pypy3.5 install: diff --git a/setup.py b/setup.py index 3427cb3..efa87c5 100644 --- a/setup.py +++ b/setup.py @@ -29,10 +29,10 @@ setup( 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: Implementation :: CPython', 'Programming Language :: Python :: Implementation :: PyPy', 'Topic :: Software Development :: Libraries :: Python Modules' -- cgit v1.2.3 From 48e00c5f8305de04363dbfaa2fdef64fa6f49c98 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Sat, 27 Oct 2018 23:09:19 +0200 Subject: Fix #107: Remove 'self' from @cachedmethod key arguments. --- cachetools/__init__.py | 4 ++-- tests/test_method.py | 4 ++++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/cachetools/__init__.py b/cachetools/__init__.py index ca67c58..d03d014 100644 --- a/cachetools/__init__.py +++ b/cachetools/__init__.py @@ -79,7 +79,7 @@ def cachedmethod(cache, key=keys.hashkey, lock=None): c = cache(self) if c is None: return method(self, *args, **kwargs) - k = key(self, *args, **kwargs) + k = key(*args, **kwargs) try: return c[k] except KeyError: @@ -95,7 +95,7 @@ def cachedmethod(cache, key=keys.hashkey, lock=None): c = cache(self) if c is None: return method(self, *args, **kwargs) - k = key(self, *args, **kwargs) + k = key(*args, **kwargs) try: with lock(self): return c[k] diff --git a/tests/test_method.py b/tests/test_method.py index 2095970..9252fef 100644 --- a/tests/test_method.py +++ b/tests/test_method.py @@ -22,6 +22,10 @@ class Cached(object): self.count += 1 return count + # https://github.com/tkem/cachetools/issues/107 + def __hash__(self): + raise TypeError('unhashable type') + class Locked(object): -- cgit v1.2.3 From 5b85b3f523e92913fd742d35fb409e1caf9cec57 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Sat, 27 Oct 2018 22:53:14 +0200 Subject: Fix #109: Remove "missing" constructor argument. --- cachetools/cache.py | 23 ++--------------------- cachetools/lfu.py | 6 +++--- cachetools/lru.py | 6 +++--- cachetools/rr.py | 7 +++---- cachetools/ttl.py | 7 +++---- docs/index.rst | 35 +++++------------------------------ tests/__init__.py | 18 ------------------ 7 files changed, 19 insertions(+), 83 deletions(-) diff --git a/cachetools/cache.py b/cachetools/cache.py index a9a3e57..5cb8071 100644 --- a/cachetools/cache.py +++ b/cachetools/cache.py @@ -1,7 +1,5 @@ from __future__ import absolute_import -from warnings import warn - from .abc import DefaultMapping @@ -16,20 +14,12 @@ class _DefaultSize(object): return 1 -_deprecated = object() - - class Cache(DefaultMapping): """Mutable mapping to serve as a simple cache or cache base class.""" __size = _DefaultSize() - def __init__(self, maxsize, missing=_deprecated, getsizeof=None): - if missing is not _deprecated: - warn("Cache constructor parameter 'missing' is deprecated", - DeprecationWarning, 3) - if missing: - self.__missing = missing + def __init__(self, maxsize, getsizeof=None): if getsizeof: self.getsizeof = getsizeof if self.getsizeof is not Cache.getsizeof: @@ -77,12 +67,7 @@ class Cache(DefaultMapping): return key in self.__data def __missing__(self, key): - value = self.__missing(key) - try: - self.__setitem__(key, value) - except ValueError: - pass # value too large - return value + raise KeyError(key) def __iter__(self): return iter(self.__data) @@ -104,7 +89,3 @@ class Cache(DefaultMapping): def getsizeof(value): """Return the size of a cache element's value.""" return 1 - - @staticmethod - def __missing(key): - raise KeyError(key) diff --git a/cachetools/lfu.py b/cachetools/lfu.py index 76a264a..4857c4e 100644 --- a/cachetools/lfu.py +++ b/cachetools/lfu.py @@ -2,14 +2,14 @@ from __future__ import absolute_import import collections -from .cache import Cache, _deprecated +from .cache import Cache class LFUCache(Cache): """Least Frequently Used (LFU) cache implementation.""" - def __init__(self, maxsize, missing=_deprecated, getsizeof=None): - Cache.__init__(self, maxsize, missing, getsizeof) + def __init__(self, maxsize, getsizeof=None): + Cache.__init__(self, maxsize, getsizeof) self.__counter = collections.Counter() def __getitem__(self, key, cache_getitem=Cache.__getitem__): diff --git a/cachetools/lru.py b/cachetools/lru.py index 991b0e2..44ec4f1 100644 --- a/cachetools/lru.py +++ b/cachetools/lru.py @@ -2,14 +2,14 @@ from __future__ import absolute_import import collections -from .cache import Cache, _deprecated +from .cache import Cache class LRUCache(Cache): """Least Recently Used (LRU) cache implementation.""" - def __init__(self, maxsize, missing=_deprecated, getsizeof=None): - Cache.__init__(self, maxsize, missing, getsizeof) + def __init__(self, maxsize, getsizeof=None): + Cache.__init__(self, maxsize, getsizeof) self.__order = collections.OrderedDict() def __getitem__(self, key, cache_getitem=Cache.__getitem__): diff --git a/cachetools/rr.py b/cachetools/rr.py index 1aeed43..09ff770 100644 --- a/cachetools/rr.py +++ b/cachetools/rr.py @@ -2,7 +2,7 @@ from __future__ import absolute_import import random -from .cache import Cache, _deprecated +from .cache import Cache # random.choice cannot be pickled in Python 2.7 @@ -13,9 +13,8 @@ def _choice(seq): class RRCache(Cache): """Random Replacement (RR) cache implementation.""" - def __init__(self, maxsize, choice=random.choice, missing=_deprecated, - getsizeof=None): - Cache.__init__(self, maxsize, missing, getsizeof) + def __init__(self, maxsize, choice=random.choice, getsizeof=None): + Cache.__init__(self, maxsize, getsizeof) # TODO: use None as default, assing to self.choice directly? if choice is random.choice: self.__choice = _choice diff --git a/cachetools/ttl.py b/cachetools/ttl.py index d4c3b37..6884045 100644 --- a/cachetools/ttl.py +++ b/cachetools/ttl.py @@ -3,7 +3,7 @@ from __future__ import absolute_import import collections import time -from .cache import Cache, _deprecated +from .cache import Cache class _Link(object): @@ -57,9 +57,8 @@ class _Timer(object): class TTLCache(Cache): """LRU Cache implementation with per-item time-to-live (TTL) value.""" - def __init__(self, maxsize, ttl, timer=time.time, missing=_deprecated, - getsizeof=None): - Cache.__init__(self, maxsize, missing, getsizeof) + def __init__(self, maxsize, ttl, timer=time.time, getsizeof=None): + Cache.__init__(self, maxsize, getsizeof) self.__root = root = _Link() root.prev = root.next = root self.__links = collections.OrderedDict() diff --git a/docs/index.rst b/docs/index.rst index e5fbf59..e0f928f 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -43,7 +43,7 @@ named constructor parameter `getsizeof`, which may specify a function of one argument used to retrieve the size of an item's value. -.. autoclass:: Cache(maxsize, missing=None, getsizeof=None) +.. autoclass:: Cache(maxsize, getsizeof=None) :members: This class discards arbitrary items using :meth:`popitem` to make @@ -53,34 +53,19 @@ of one argument used to retrieve the size of an item's value. additionally need to override :meth:`__getitem__`, :meth:`__setitem__` and :meth:`__delitem__`. - .. deprecated:: 2.1 - - The `missing` argument. Override :meth:`__missing__` in a - subclass instead. - -.. autoclass:: LFUCache(maxsize, missing=None, getsizeof=None) +.. autoclass:: LFUCache(maxsize, getsizeof=None) :members: This class counts how often an item is retrieved, and discards the items used least often to make space when necessary. - .. deprecated:: 2.1 - - The `missing` argument. Override :meth:`__missing__` in a - subclass instead. - -.. autoclass:: LRUCache(maxsize, missing=None, getsizeof=None) +.. autoclass:: LRUCache(maxsize, getsizeof=None) :members: This class discards the least recently used items first to make space when necessary. - .. deprecated:: 2.1 - - The `missing` argument. Override :meth:`__missing__` in a - subclass instead. - -.. autoclass:: RRCache(maxsize, choice=random.choice, missing=None, getsizeof=None) +.. autoclass:: RRCache(maxsize, choice=random.choice, getsizeof=None) :members: This class randomly selects candidate items and discards them to @@ -91,12 +76,7 @@ of one argument used to retrieve the size of an item's value. an alternative function that returns an arbitrary element from a non-empty sequence. - .. deprecated:: 2.1 - - The `missing` argument. Override :meth:`__missing__` in a - subclass instead. - -.. autoclass:: TTLCache(maxsize, ttl, timer=time.time, missing=None, getsizeof=None) +.. autoclass:: TTLCache(maxsize, ttl, timer=time.time, getsizeof=None) :members: popitem, timer, ttl This class associates a time-to-live value with each item. Items @@ -109,11 +89,6 @@ of one argument used to retrieve the size of an item's value. :func:`time.time` function is used to retrieve the current time. A custom `timer` function can be supplied if needed. - .. deprecated:: 2.1 - - The `missing` argument. Override :meth:`__missing__` in a - subclass instead. - .. method:: expire(self, time=None) Expired items will be removed from a cache only at the next diff --git a/tests/__init__.py b/tests/__init__.py index f1d69f9..0be3f32 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -1,6 +1,3 @@ -import warnings - - class CacheTestMixin(object): Cache = None @@ -172,21 +169,6 @@ class CacheTestMixin(object): self.assertIn(2, cache) self.assertNotIn(3, cache) - def test_missing_param(self): - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') - c = self.Cache(2, missing=lambda x: x) - self.assertEqual(len(w), 1) - self.assertIs(w[0].category, DeprecationWarning) - self._test_missing(c) - - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') - c = self.Cache(2, missing=lambda x: x, getsizeof=lambda x: x) - self.assertEqual(len(w), 1) - self.assertIs(w[0].category, DeprecationWarning) - self._test_missing_getsizeof(c) - def test_missing_subclass(self): class Cache(self.Cache): def __missing__(self, key): -- cgit v1.2.3 From 52f8dbaef64e3e585c78152278523543f685d91a Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Sat, 27 Oct 2018 23:23:51 +0200 Subject: Increase coverage. --- cachetools/func.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cachetools/func.py b/cachetools/func.py index 5a2ce84..9727d0c 100644 --- a/cachetools/func.py +++ b/cachetools/func.py @@ -9,7 +9,7 @@ import time try: from threading import RLock -except ImportError: +except ImportError: # pragma: no cover from dummy_threading import RLock from . import keys -- cgit v1.2.3 From 2cde6ef84274b4226729b6a4f4bad251ab0aa3c4 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Sat, 3 Nov 2018 17:11:10 +0100 Subject: Fix #68: Use Sphinx doctest builder. --- .travis.yml | 2 +- docs/index.rst | 191 +++++++++++++++++++++++++++++++++------------------------ tox.ini | 9 ++- 3 files changed, 119 insertions(+), 83 deletions(-) diff --git a/.travis.yml b/.travis.yml index dbabe75..8f85efd 100644 --- a/.travis.yml +++ b/.travis.yml @@ -16,7 +16,7 @@ install: - pip install coveralls tox script: -- tox -e check-manifest,flake8,py +- tox after_success: - coveralls diff --git a/docs/index.rst b/docs/index.rst index e0f928f..a4652bb 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -22,6 +22,15 @@ implemented, and decorators for easily memoizing function and method calls are provided, too. +.. testsetup:: * + + import operator + from cachetools import cached, cachedmethod, LRUCache + + import mock + urllib = mock.MagicMock() + + Cache implementations ------------------------------------------------------------------------ @@ -107,44 +116,48 @@ Extending cache classes Sometimes it may be desirable to notice when and what cache items are evicted, i.e. removed from a cache to make room for new items. Since all cache implementations call :meth:`popitem` to evict items from the -cache, this can be achieved by overriding this method in a subclass:: - - >>> from cachetools import LRUCache - >>> class MyCache(LRUCache): - ... def popitem(self): - ... key, value = super().popitem() - ... print('Key "%s" evicted with value "%s"' % (key, value)) - ... return key, value - ... - >>> c = MyCache(maxsize=2) - >>> c['a'] = 1 - >>> c['b'] = 2 - >>> c['c'] = 3 - Key "a" evicted with value "1" +cache, this can be achieved by overriding this method in a subclass: + +.. doctest:: + :pyversion: >= 3 + + >>> class MyCache(LRUCache): + ... def popitem(self): + ... key, value = super().popitem() + ... print('Key "%s" evicted with value "%s"' % (key, value)) + ... return key, value + + >>> c = MyCache(maxsize=2) + >>> c['a'] = 1 + >>> c['b'] = 2 + >>> c['c'] = 3 + Key "a" evicted with value "1" Similar to the standard library's :class:`collections.defaultdict`, subclasses of :class:`Cache` may implement a :meth:`__missing__` method which is called by :meth:`Cache.__getitem__` if the requested -key is not found:: - - >>> from cachetools import LRUCache - >>> import urllib.request - >>> class PepStore(LRUCache): - ... def __missing__(self, key): - ... """Retrieve text of a Python Enhancement Proposal""" - ... url = 'http://www.python.org/dev/peps/pep-%04d/' % key - ... try: - ... with urllib.request.urlopen(url) as s: - ... pep = s.read() - ... self[key] = pep # store text in cache - ... return pep - ... except urllib.error.HTTPError: - ... return 'Not Found' # do not store in cache - >>> peps = PepStore(maxsize=4) - >>> for n in 8, 9, 290, 308, 320, 8, 218, 320, 279, 289, 320, 9991: - ... pep = peps[n] - >>> print(sorted(peps.keys())) - [218, 279, 289, 320] +key is not found: + +.. doctest:: + :pyversion: >= 3 + + >>> class PepStore(LRUCache): + ... def __missing__(self, key): + ... """Retrieve text of a Python Enhancement Proposal""" + ... url = 'http://www.python.org/dev/peps/pep-%04d/' % key + ... try: + ... with urllib.request.urlopen(url) as s: + ... pep = s.read() + ... self[key] = pep # store text in cache + ... return pep + ... except urllib.error.HTTPError: + ... return 'Not Found' # do not store in cache + + >>> peps = PepStore(maxsize=4) + >>> for n in 8, 9, 290, 308, 320, 8, 218, 320, 279, 289, 320: + ... pep = peps[n] + >>> print(sorted(peps.keys())) + [218, 279, 289, 320] Note, though, that such a class does not really behave like a *cache* any more, and will lead to surprising results when used with any of @@ -157,16 +170,17 @@ Memoizing decorators The :mod:`cachetools` module provides decorators for memoizing function and method calls. This can save time when a function is -often called with the same arguments:: +often called with the same arguments: - from cachetools import cached +.. doctest:: - @cached(cache={}) - def fib(n): - return n if n < 2 else fib(n - 1) + fib(n - 2) + >>> @cached(cache={}) + ... def fib(n): + ... 'Compute the nth number in the Fibonacci sequence' + ... return n if n < 2 else fib(n - 1) + fib(n - 2) - for i in range(100): - print('fib(%d) = %d' % (i, fib(i))) + >>> fib(42) + 267914296 .. decorator:: cached(cache, key=cachetools.keys.hashkey, lock=None) @@ -206,44 +220,56 @@ often called with the same arguments:: cache during runtime, the cache should be assigned to a variable. When a `lock` object is used, any access to the cache from outside the function wrapper should also be performed within an appropriate - `with` statement:: + `with` statement: + + .. testcode:: - from threading import RLock - from cachetools import cached, LRUCache + from threading import RLock - cache = LRUCache(maxsize=100) - lock = RLock() + cache = LRUCache(maxsize=32) + lock = RLock() - @cached(cache, lock=lock) - def fib(n): - return n if n < 2 else fib(n - 1) + fib(n - 2) + @cached(cache, lock=lock) + def get_pep(num): + 'Retrieve text of a Python Enhancement Proposal' + url = 'http://www.python.org/dev/peps/pep-%04d/' % num + with urllib.request.urlopen(url) as s: + return s.read() - # make sure access to cache is synchronized - with lock: - cache.clear() + # make sure access to cache is synchronized + with lock: + cache.clear() It is also possible to use a single shared cache object with multiple functions. However, care must be taken that different cache keys are generated for each function, even for identical - function arguments:: + function arguments: - from functools import partial - from cachetools import cached, LRUCache - from cachetools.keys import hashkey + .. doctest:: + :options: +ELLIPSIS - cache = LRUCache(maxsize=100) + >>> from cachetools.keys import hashkey + >>> from functools import partial - @cached(cache, key=partial(hashkey, 'fib')) - def fib(n): - return n if n < 2 else fib(n - 1) + fib(n - 2) + >>> # shared cache for integer sequences + >>> numcache = {} - @cached(cache, key=partial(hashkey, 'fac')) - def fac(n): - return 1 if n == 0 else n * fac(n - 1) + >>> # compute Fibonacci numbers + >>> @cached(numcache, key=partial(hashkey, 'fib')) + ... def fib(n): + ... return n if n < 2 else fib(n - 1) + fib(n - 2) - print(fib(42)) - print(fac(42)) - print(cache) + >>> # compute Lucas numbers + >>> @cached(numcache, key=partial(hashkey, 'luc')) + ... def luc(n): + ... return 2 - n if n < 2 else luc(n - 1) + luc(n - 2) + + >>> fib(42) + 267914296 + >>> luc(42) + 599074578 + >>> list(sorted(numcache.items())) + [..., (('fib', 42), 267914296), ..., (('luc', 42), 599074578)] .. decorator:: cachedmethod(cache, key=cachetools.keys.hashkey, lock=None) @@ -265,27 +291,30 @@ often called with the same arguments:: One advantage of :func:`cachedmethod` over the :func:`cached` function decorator is that cache properties such as `maxsize` can - be set at runtime:: + be set at runtime: + + .. testcode:: - import operator - import urllib.request + class CachedPEPs(object): - from cachetools import LRUCache, cachedmethod + def __init__(self, cachesize): + self.cache = LRUCache(maxsize=cachesize) - class CachedPEPs(object): + @cachedmethod(operator.attrgetter('cache')) + def get(self, num): + """Retrieve text of a Python Enhancement Proposal""" + url = 'http://www.python.org/dev/peps/pep-%04d/' % num + with urllib.request.urlopen(url) as s: + return s.read() - def __init__(self, cachesize): - self.cache = LRUCache(maxsize=cachesize) + peps = CachedPEPs(cachesize=10) + print("PEP #1: %s" % peps.get(1)) - @cachedmethod(operator.attrgetter('cache')) - def get(self, num): - """Retrieve text of a Python Enhancement Proposal""" - url = 'http://www.python.org/dev/peps/pep-%04d/' % num - with urllib.request.urlopen(url) as s: - return s.read() + .. testoutput:: + :hide: + :options: +ELLIPSIS - peps = CachedPEPs(cachesize=10) - print("PEP #1: %s" % peps.get(1)) + PEP #1: ... :mod:`cachetools.keys` --- Key functions for memoizing decorators diff --git a/tox.ini b/tox.ini index 0bfd943..105ff74 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = check-manifest,docs,flake8,py +envlist = check-manifest,docs,doctest,flake8,py [testenv] deps = @@ -22,6 +22,13 @@ deps = commands = sphinx-build -W -b html -d {envtmpdir}/doctrees docs {envtmpdir}/html +[testenv:doctest] +deps = + mock + sphinx +commands = + sphinx-build -W -b doctest -d {envtmpdir}/doctrees docs {envtmpdir}/doctest + [testenv:flake8] deps = flake8 -- cgit v1.2.3 From a1a6d7c418bddab4461998c71214ac4b4bb5da27 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Sat, 3 Nov 2018 22:54:23 +0100 Subject: Fix #54: Add note regarding thread safety. --- docs/index.rst | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/docs/index.rst b/docs/index.rst index a4652bb..f185d9c 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -51,6 +51,13 @@ making the cache's size equal to the number of its items, or named constructor parameter `getsizeof`, which may specify a function of one argument used to retrieve the size of an item's value. +.. note:: + + Please be aware that :class:`Cache` and its subclasses are *not* + thread-safe. Access to a shared cache from multiple threads must + be properly synchronized, e.g. by using one of the memoizing + decorators with a suitable `lock` object. + .. autoclass:: Cache(maxsize, getsizeof=None) :members: -- cgit v1.2.3 From aa21f86b12c5cc7886c3a0e4d57c30de5352dd0e Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Sun, 4 Nov 2018 14:58:07 +0100 Subject: Fix #113: Add support for maxsize=None in cachetools.func decorators. --- cachetools/func.py | 38 ++++++++++++++++++++++++++++++++++---- docs/index.rst | 12 ++++++------ tests/test_func.py | 13 ++++++++++++- 3 files changed, 52 insertions(+), 11 deletions(-) diff --git a/cachetools/func.py b/cachetools/func.py index 9727d0c..1b0b856 100644 --- a/cachetools/func.py +++ b/cachetools/func.py @@ -26,6 +26,24 @@ _CacheInfo = collections.namedtuple('CacheInfo', [ ]) +class _UnboundCache(dict): + + maxsize = None + + @property + def currsize(self): + return len(self) + + +class _UnboundTTLCache(TTLCache): + def __init__(self, ttl, timer): + TTLCache.__init__(self, float('inf'), ttl, timer) + + @property + def maxsize(self): + return None + + def _cache(cache, typed=False): def decorator(func): key = keys.typedkey if typed else keys.hashkey @@ -77,7 +95,10 @@ def lfu_cache(maxsize=128, typed=False): algorithm. """ - return _cache(LFUCache(maxsize), typed) + if maxsize is None: + return _cache(_UnboundCache(), typed) + else: + return _cache(LFUCache(maxsize), typed) def lru_cache(maxsize=128, typed=False): @@ -86,7 +107,10 @@ def lru_cache(maxsize=128, typed=False): algorithm. """ - return _cache(LRUCache(maxsize), typed) + if maxsize is None: + return _cache(_UnboundCache(), typed) + else: + return _cache(LRUCache(maxsize), typed) def rr_cache(maxsize=128, choice=random.choice, typed=False): @@ -95,7 +119,10 @@ def rr_cache(maxsize=128, choice=random.choice, typed=False): algorithm. """ - return _cache(RRCache(maxsize, choice), typed) + if maxsize is None: + return _cache(_UnboundCache(), typed) + else: + return _cache(RRCache(maxsize, choice), typed) def ttl_cache(maxsize=128, ttl=600, timer=time.time, typed=False): @@ -103,4 +130,7 @@ def ttl_cache(maxsize=128, ttl=600, timer=time.time, typed=False): up to `maxsize` results based on a Least Recently Used (LRU) algorithm with a per-item time-to-live (TTL) value. """ - return _cache(TTLCache(maxsize, ttl, timer), typed) + if maxsize is None: + return _cache(_UnboundTTLCache(ttl, timer), typed) + else: + return _cache(TTLCache(maxsize, ttl, timer), typed) diff --git a/docs/index.rst b/docs/index.rst index f185d9c..38259c2 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -58,7 +58,6 @@ of one argument used to retrieve the size of an item's value. be properly synchronized, e.g. by using one of the memoizing decorators with a suitable `lock` object. - .. autoclass:: Cache(maxsize, getsizeof=None) :members: @@ -377,9 +376,9 @@ To ease migration from (or to) Python 3's :func:`functools.lru_cache`, this module provides several memoizing function decorators with a similar API. All these decorators wrap a function with a memoizing callable that saves up to the `maxsize` most recent calls, using -different caching strategies. Note that unlike -:func:`functools.lru_cache`, setting `maxsize` to :const:`None` is not -supported. +different caching strategies. If `maxsize` is set to :const:`None`, +the caching strategy is effectively disabled and the cache can grow +without bound. If the optional argument `typed` is set to :const:`True`, function arguments of different types will be cached separately. For example, @@ -388,8 +387,9 @@ distinct results. The wrapped function is instrumented with :func:`cache_info` and :func:`cache_clear` functions to provide information about cache -performance and clear the cache. See the :func:`functools.lru_cache` -documentation for details. +performance and clear the cache. Please see the +:func:`functools.lru_cache` documentation for details. Also note that +all the decorators in this module are thread-safe by default. .. decorator:: lfu_cache(maxsize=128, typed=False) diff --git a/tests/test_func.py b/tests/test_func.py index 1f33246..473d9c9 100644 --- a/tests/test_func.py +++ b/tests/test_func.py @@ -30,7 +30,7 @@ class DecoratorTestMixin(object): self.assertEqual(cached(1), 1) self.assertEqual(cached.cache_info(), (0, 1, 2, 1)) - def test_decorator_nosize(self): + def test_decorator_nocache(self): cached = self.decorator(maxsize=0)(lambda n: n) self.assertEqual(cached.cache_info(), (0, 0, 0, 0)) @@ -41,6 +41,17 @@ class DecoratorTestMixin(object): self.assertEqual(cached(1.0), 1.0) self.assertEqual(cached.cache_info(), (0, 3, 0, 0)) + def test_decorator_unbound(self): + cached = self.decorator(maxsize=None)(lambda n: n) + + self.assertEqual(cached.cache_info(), (0, 0, None, 0)) + self.assertEqual(cached(1), 1) + self.assertEqual(cached.cache_info(), (0, 1, None, 1)) + self.assertEqual(cached(1), 1) + self.assertEqual(cached.cache_info(), (1, 1, None, 1)) + self.assertEqual(cached(1.0), 1.0) + self.assertEqual(cached.cache_info(), (2, 1, None, 1)) + def test_decorator_typed(self): cached = self.decorator(maxsize=2, typed=True)(lambda n: n) -- cgit v1.2.3 From eac1c25ec0f2501e8fd93f31b50d01ed93df3c2c Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Sun, 4 Nov 2018 16:11:55 +0100 Subject: Fix #54: Update README examples. --- README.rst | 35 ++++++++++++++++++++--------------- 1 file changed, 20 insertions(+), 15 deletions(-) diff --git a/README.rst b/README.rst index 125e781..49f669f 100644 --- a/README.rst +++ b/README.rst @@ -5,21 +5,26 @@ This module provides various memoizing collections and decorators, including variants of the Python 3 Standard Library `@lru_cache`_ function decorator. -.. code-block:: pycon - - >>> from cachetools import LRUCache - >>> cache = LRUCache(maxsize=2) - >>> cache.update([('first', 1), ('second', 2)]) - >>> cache - LRUCache([('second', 2), ('first', 1)], maxsize=2, currsize=2) - >>> cache['third'] = 3 - >>> cache - LRUCache([('second', 2), ('third', 3)], maxsize=2, currsize=2) - >>> cache['second'] - 2 - >>> cache['fourth'] = 4 - >>> cache - LRUCache([('second', 2), ('fourth', 4)], maxsize=2, currsize=2) +.. code-block:: python + + from cachetools import cached, LRUCache, TTLCache + + # speed up calculating Fibonacci numbers with dynamic programming + @cached(cache={}) + def fib(n): + return n if n < 2 else fib(n - 1) + fib(n - 2) + + # cache least recently used Python Enhancement Proposals + @cached(cache=LRUCache(maxsize=32)) + def get_pep(num): + url = 'http://www.python.org/dev/peps/pep-%04d/' % num + with urllib.request.urlopen(url) as s: + return s.read() + + # cache weather data for no longer than ten minutes + @cached(cache=TTLCache(maxsize=1024, ttl=600)) + def get_weather(place): + return owm.weather_at_place(place).get_weather() For the purpose of this module, a *cache* is a mutable_ mapping_ of a fixed maximum size. When the cache is full, i.e. by adding another -- cgit v1.2.3 From 90d35075eca92d2064bb87ddc1f19f0bf82311c9 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Sat, 3 Nov 2018 20:26:37 +0100 Subject: Prepare v3.0.0. --- CHANGES.rst | 15 +++++++++++++++ cachetools/__init__.py | 2 +- 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index 53b4fd4..26351c9 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,18 @@ +v3.0.0 (2018-11-04) +------------------- + +- Officially support Python 3.7. + +- Drop Python 3.3 support (breaking change). + +- Remove ``missing`` cache constructor parameter (breaking change). + +- Remove ``self`` from ``@cachedmethod`` key arguments (breaking + change). + +- Add support for ``maxsize=None`` in ``cachetools.func`` decorators. + + v2.1.0 (2018-05-12) ------------------- diff --git a/cachetools/__init__.py b/cachetools/__init__.py index d03d014..beb1679 100644 --- a/cachetools/__init__.py +++ b/cachetools/__init__.py @@ -16,7 +16,7 @@ __all__ = ( 'cached', 'cachedmethod' ) -__version__ = '2.1.0' +__version__ = '3.0.0' if hasattr(functools.update_wrapper(lambda f: f(), lambda: 42), '__wrapped__'): _update_wrapper = functools.update_wrapper -- cgit v1.2.3 From df509582fbde6cdf83a2c0fe14b265649a3ba4c7 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Tue, 29 Jan 2019 20:55:27 +0100 Subject: Bump copyright year. --- LICENSE | 2 +- README.rst | 2 +- docs/conf.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/LICENSE b/LICENSE index 9faa2f0..7da84f4 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ The MIT License (MIT) -Copyright (c) 2014-2018 Thomas Kemmer +Copyright (c) 2014-2019 Thomas Kemmer Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in diff --git a/README.rst b/README.rst index 49f669f..96f1634 100644 --- a/README.rst +++ b/README.rst @@ -78,7 +78,7 @@ Project Resources License ------------------------------------------------------------------------ -Copyright (c) 2014-2018 Thomas Kemmer. +Copyright (c) 2014-2019 Thomas Kemmer. Licensed under the `MIT License`_. diff --git a/docs/conf.py b/docs/conf.py index 9a64411..3cdb9cf 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -6,7 +6,7 @@ def get_version(filename): project = 'cachetools' -copyright = '2014-2018 Thomas Kemmer' +copyright = '2014-2019 Thomas Kemmer' version = get_version(b'../cachetools/__init__.py') release = version -- cgit v1.2.3 From 00f85990747ffdfb850e2cfbf674b5ac875d8ae2 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Tue, 29 Jan 2019 20:48:17 +0100 Subject: Fix #118: Improve documentation regarding thread safety. --- docs/index.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/index.rst b/docs/index.rst index 38259c2..b56ecc2 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -53,10 +53,10 @@ of one argument used to retrieve the size of an item's value. .. note:: - Please be aware that :class:`Cache` and its subclasses are *not* - thread-safe. Access to a shared cache from multiple threads must - be properly synchronized, e.g. by using one of the memoizing - decorators with a suitable `lock` object. + Please be aware that all these classes are *not* thread-safe. + Access to a shared cache from multiple threads must be properly + synchronized, e.g. by using one of the memoizing decorators with a + suitable `lock` object. .. autoclass:: Cache(maxsize, getsizeof=None) :members: -- cgit v1.2.3 From 4d1a68523ebc97a2ecf36cc29fdb9718c7dc053f Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Tue, 29 Jan 2019 20:17:58 +0100 Subject: Fix #123: Use time.monotonic as default timer if available. --- cachetools/func.py | 8 ++++++-- cachetools/ttl.py | 8 ++++++-- docs/index.rst | 12 +++++++----- 3 files changed, 19 insertions(+), 9 deletions(-) diff --git a/cachetools/func.py b/cachetools/func.py index 1b0b856..8ced5dd 100644 --- a/cachetools/func.py +++ b/cachetools/func.py @@ -5,7 +5,11 @@ from __future__ import absolute_import import collections import functools import random -import time + +try: + from time import monotonic as default_timer +except ImportError: + from time import time as default_timer try: from threading import RLock @@ -125,7 +129,7 @@ def rr_cache(maxsize=128, choice=random.choice, typed=False): return _cache(RRCache(maxsize, choice), typed) -def ttl_cache(maxsize=128, ttl=600, timer=time.time, typed=False): +def ttl_cache(maxsize=128, ttl=600, timer=default_timer, typed=False): """Decorator to wrap a function with a memoizing callable that saves up to `maxsize` results based on a Least Recently Used (LRU) algorithm with a per-item time-to-live (TTL) value. diff --git a/cachetools/ttl.py b/cachetools/ttl.py index 6884045..1edde3a 100644 --- a/cachetools/ttl.py +++ b/cachetools/ttl.py @@ -1,7 +1,11 @@ from __future__ import absolute_import import collections -import time + +try: + from time import monotonic as default_timer +except ImportError: + from time import time as default_timer from .cache import Cache @@ -57,7 +61,7 @@ class _Timer(object): class TTLCache(Cache): """LRU Cache implementation with per-item time-to-live (TTL) value.""" - def __init__(self, maxsize, ttl, timer=time.time, getsizeof=None): + def __init__(self, maxsize, ttl, timer=default_timer, getsizeof=None): Cache.__init__(self, maxsize, getsizeof) self.__root = root = _Link() root.prev = root.next = root diff --git a/docs/index.rst b/docs/index.rst index b56ecc2..e6379f3 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -91,7 +91,7 @@ of one argument used to retrieve the size of an item's value. an alternative function that returns an arbitrary element from a non-empty sequence. -.. autoclass:: TTLCache(maxsize, ttl, timer=time.time, getsizeof=None) +.. autoclass:: TTLCache(maxsize, ttl, timer=time.monotonic, getsizeof=None) :members: popitem, timer, ttl This class associates a time-to-live value with each item. Items @@ -100,9 +100,11 @@ of one argument used to retrieve the size of an item's value. expired items are there to remove, the least recently used items will be discarded first to make space when necessary. - By default, the time-to-live is specified in seconds, and the - :func:`time.time` function is used to retrieve the current time. A - custom `timer` function can be supplied if needed. + By default, the time-to-live is specified in seconds and + :func:`time.monotonic` is used to retrieve the current time. If + :func:`time.monotonic` is not available, e.g. when running Python + 2.7, :func:`time.time` will be used. A custom `timer` function can + be supplied if needed. .. method:: expire(self, time=None) @@ -409,7 +411,7 @@ all the decorators in this module are thread-safe by default. saves up to `maxsize` results based on a Random Replacement (RR) algorithm. -.. decorator:: ttl_cache(maxsize=128, ttl=600, timer=time.time, typed=False) +.. decorator:: ttl_cache(maxsize=128, ttl=600, timer=time.monotonic, typed=False) Decorator to wrap a function with a memoizing callable that saves up to `maxsize` results based on a Least Recently Used (LRU) -- cgit v1.2.3 From 9aac551118beb07c98c3004b159d06f171b1d09c Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Sun, 27 Jan 2019 22:42:39 +0100 Subject: Fix #124: Fix Python 3.8 compatibility issue. --- cachetools/abc.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/cachetools/abc.py b/cachetools/abc.py index b663d96..3bc43cc 100644 --- a/cachetools/abc.py +++ b/cachetools/abc.py @@ -1,10 +1,14 @@ from __future__ import absolute_import -import collections from abc import abstractmethod +try: + from collections.abc import MutableMapping +except ImportError: + from collections import MutableMapping -class DefaultMapping(collections.MutableMapping): + +class DefaultMapping(MutableMapping): __slots__ = () -- cgit v1.2.3 From 664b45bf4740016b419df7226469f8b6793a6d48 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Tue, 29 Jan 2019 21:25:27 +0100 Subject: Prepare v3.1.0. --- CHANGES.rst | 10 ++++++++++ cachetools/__init__.py | 2 +- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index 26351c9..f863464 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,13 @@ +v3.1.0 (UNRELEASED) +------------------- + +- Fix Python 3.8 compatibility issue. + +- Use time.monotonic as default timer if available. + +- Improve documentation regarding thread safety. + + v3.0.0 (2018-11-04) ------------------- diff --git a/cachetools/__init__.py b/cachetools/__init__.py index beb1679..e9a61f7 100644 --- a/cachetools/__init__.py +++ b/cachetools/__init__.py @@ -16,7 +16,7 @@ __all__ = ( 'cached', 'cachedmethod' ) -__version__ = '3.0.0' +__version__ = '3.1.0' if hasattr(functools.update_wrapper(lambda f: f(), lambda: 42), '__wrapped__'): _update_wrapper = functools.update_wrapper -- cgit v1.2.3 From 1b67cddadccb89993e9d2567bac22e57e2b2b373 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Tue, 29 Jan 2019 21:33:49 +0100 Subject: Release v3.1.0. --- CHANGES.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index f863464..350b977 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,9 +1,9 @@ -v3.1.0 (UNRELEASED) +v3.1.0 (2019-01-29) ------------------- - Fix Python 3.8 compatibility issue. -- Use time.monotonic as default timer if available. +- Use ``time.monotonic`` as default timer if available. - Improve documentation regarding thread safety. -- cgit v1.2.3 From 3e60e9431ea881c6372cb3b87bcdd975ef70fdd4 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Sun, 27 Jan 2019 22:32:08 +0100 Subject: Add Travis 3.8-dev build. --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index 8f85efd..cfd99e1 100644 --- a/.travis.yml +++ b/.travis.yml @@ -10,6 +10,7 @@ python: - 3.5 - 3.6 - 3.7 +- 3.8-dev - pypy3.5 install: -- cgit v1.2.3 From 748d10de2808bc35b61162930c7adb150cc203e4 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Fri, 17 May 2019 18:09:42 +0200 Subject: Fix #130: Fix pickling/unpickling of cache keys. --- cachetools/keys.py | 13 ++++++++++++- tests/test_keys.py | 13 +++++++++++++ 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/cachetools/keys.py b/cachetools/keys.py index adb9dad..25ac0a7 100644 --- a/cachetools/keys.py +++ b/cachetools/keys.py @@ -6,6 +6,12 @@ __all__ = ('hashkey', 'typedkey') class _HashedTuple(tuple): + """A tuple that ensures that hash() will be called no more than once + per element, since cache decorators will hash the key multiple + times on a cache miss. See also _HashedSeq in the standard + library functools implementation. + + """ __hashvalue = None @@ -21,8 +27,13 @@ class _HashedTuple(tuple): def __radd__(self, other, add=tuple.__add__): return _HashedTuple(add(other, self)) + def __getstate__(self): + return {} + -_kwmark = (object(),) +# used for separating keyword arguments; we do not use an object +# instance here so identity is preserved when pickling/unpickling +_kwmark = (_HashedTuple,) def hashkey(*args, **kwargs): diff --git a/tests/test_keys.py b/tests/test_keys.py index 2b9ced6..8137704 100644 --- a/tests/test_keys.py +++ b/tests/test_keys.py @@ -43,3 +43,16 @@ class CacheKeysTest(unittest.TestCase): self.assertIsInstance(key(1, 2, 3) + key(4, 5, 6), type(key())) self.assertIsInstance(key(1, 2, 3) + (4, 5, 6), type(key())) self.assertIsInstance((1, 2, 3) + key(4, 5, 6), type(key())) + + def test_pickle(self, key=cachetools.keys.hashkey): + import pickle + + for k in [key(), key('abc'), key('abc', 123), key('abc', q='abc')]: + # white-box test: assert cached hash value is not pickled + self.assertEqual(len(k.__dict__), 0) + h = hash(k) + self.assertEqual(len(k.__dict__), 1) + pickled = pickle.loads(pickle.dumps(k)) + self.assertEqual(len(pickled.__dict__), 0) + self.assertEqual(k, pickled) + self.assertEqual(h, hash(pickled)) -- cgit v1.2.3 From 15a64a05efb3aca96cfd0439e80d0ba39eec5a0e Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Mon, 20 May 2019 22:46:58 +0200 Subject: Fix #135: Document how to use shared caches with @cachedmethod. --- docs/index.rst | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/docs/index.rst b/docs/index.rst index e6379f3..956070a 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -325,6 +325,44 @@ often called with the same arguments: PEP #1: ... + When using a shared cache for multiple methods, be aware that + different cache keys must be created for each method even when + function arguments are the same, just as with the `@cached` + decorator: + + .. testcode:: + + class CachedReferences(object): + + def __init__(self, cachesize): + self.cache = LRUCache(maxsize=cachesize) + + @cachedmethod(lambda self: self.cache, key=partial(hashkey, 'pep')) + def get_pep(self, num): + """Retrieve text of a Python Enhancement Proposal""" + url = 'http://www.python.org/dev/peps/pep-%04d/' % num + with urllib.request.urlopen(url) as s: + return s.read() + + @cachedmethod(lambda self: self.cache, key=partial(hashkey, 'rfc')) + def get_rfc(self, num): + """Retrieve text of an IETF Request for Comments""" + url = 'https://tools.ietf.org/rfc/rfc%d.txt' % num + with urllib.request.urlopen(url) as s: + return s.read() + + docs = CachedReferences(cachesize=100) + print("PEP #1: %s" % docs.get_pep(1)) + print("RFC #1: %s" % docs.get_rfc(1)) + + .. testoutput:: + :hide: + :options: +ELLIPSIS + + PEP #1: ... + RFC #1: ... + + :mod:`cachetools.keys` --- Key functions for memoizing decorators ============================================================================ -- cgit v1.2.3 From 695a47e941199f5a915627aa6a7d18e31fa76fa5 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Thu, 23 May 2019 21:43:36 +0200 Subject: Prepare v3.1.1. --- CHANGES.rst | 8 ++++++++ cachetools/__init__.py | 2 +- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index 350b977..7b9f7b1 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,11 @@ +v3.1.1 (UNRELEASED) +------------------- + +- Document how to use shared caches with ``@cachedmethod``. + +- Fix pickling/unpickling of cache keys + + v3.1.0 (2019-01-29) ------------------- diff --git a/cachetools/__init__.py b/cachetools/__init__.py index e9a61f7..d95c58d 100644 --- a/cachetools/__init__.py +++ b/cachetools/__init__.py @@ -16,7 +16,7 @@ __all__ = ( 'cached', 'cachedmethod' ) -__version__ = '3.1.0' +__version__ = '3.1.1' if hasattr(functools.update_wrapper(lambda f: f(), lambda: 42), '__wrapped__'): _update_wrapper = functools.update_wrapper -- cgit v1.2.3 From c530924cdec86855be6322d3e4dd979bfc9250e4 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Thu, 23 May 2019 21:49:13 +0200 Subject: Release v3.1.1. --- CHANGES.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index 7b9f7b1..c1c0bb3 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,4 +1,4 @@ -v3.1.1 (UNRELEASED) +v3.1.1 (2019-05-23) ------------------- - Document how to use shared caches with ``@cachedmethod``. -- cgit v1.2.3 From 5bb1f418b0387b8ff078c3140ac9c640d27c6a3e Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Mon, 25 Nov 2019 22:03:39 +0100 Subject: Update Travis environment. --- .travis.yml | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/.travis.yml b/.travis.yml index cfd99e1..ff4ad09 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,7 +1,3 @@ -sudo: true - -dist: xenial - language: python python: @@ -10,8 +6,8 @@ python: - 3.5 - 3.6 - 3.7 -- 3.8-dev -- pypy3.5 +- 3.8 +- pypy3 install: - pip install coveralls tox -- cgit v1.2.3 From d2020cc7afccc0e3ceee7e99111765cd24c05e39 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Wed, 11 Dec 2019 07:41:56 +0100 Subject: Drop Python 2 support. --- cachetools/__init__.py | 110 ++++------------------------------------------- cachetools/abc.py | 8 +--- cachetools/cache.py | 2 - cachetools/decorators.py | 88 +++++++++++++++++++++++++++++++++++++ cachetools/func.py | 12 +----- cachetools/keys.py | 2 - cachetools/lfu.py | 2 - cachetools/lru.py | 19 +++----- cachetools/rr.py | 2 - cachetools/ttl.py | 24 +++-------- docs/index.rst | 8 ++-- 11 files changed, 114 insertions(+), 163 deletions(-) create mode 100644 cachetools/decorators.py diff --git a/cachetools/__init__.py b/cachetools/__init__.py index d95c58d..95ac6e6 100644 --- a/cachetools/__init__.py +++ b/cachetools/__init__.py @@ -1,112 +1,20 @@ """Extensible memoizing collections and decorators.""" -from __future__ import absolute_import - -import functools - -from . import keys from .cache import Cache +from .decorators import cached, cachedmethod from .lfu import LFUCache from .lru import LRUCache from .rr import RRCache from .ttl import TTLCache __all__ = ( - 'Cache', 'LFUCache', 'LRUCache', 'RRCache', 'TTLCache', - 'cached', 'cachedmethod' + 'Cache', + 'LFUCache', + 'LRUCache', + 'RRCache', + 'TTLCache', + 'cached', + 'cachedmethod' ) -__version__ = '3.1.1' - -if hasattr(functools.update_wrapper(lambda f: f(), lambda: 42), '__wrapped__'): - _update_wrapper = functools.update_wrapper -else: - def _update_wrapper(wrapper, wrapped): - functools.update_wrapper(wrapper, wrapped) - wrapper.__wrapped__ = wrapped - return wrapper - - -def cached(cache, key=keys.hashkey, lock=None): - """Decorator to wrap a function with a memoizing callable that saves - results in a cache. - - """ - def decorator(func): - if cache is None: - def wrapper(*args, **kwargs): - return func(*args, **kwargs) - elif lock is None: - def wrapper(*args, **kwargs): - k = key(*args, **kwargs) - try: - return cache[k] - except KeyError: - pass # key not found - v = func(*args, **kwargs) - try: - cache[k] = v - except ValueError: - pass # value too large - return v - else: - def wrapper(*args, **kwargs): - k = key(*args, **kwargs) - try: - with lock: - return cache[k] - except KeyError: - pass # key not found - v = func(*args, **kwargs) - try: - with lock: - cache[k] = v - except ValueError: - pass # value too large - return v - return _update_wrapper(wrapper, func) - return decorator - - -def cachedmethod(cache, key=keys.hashkey, lock=None): - """Decorator to wrap a class or instance method with a memoizing - callable that saves results in a cache. - - """ - def decorator(method): - if lock is None: - def wrapper(self, *args, **kwargs): - c = cache(self) - if c is None: - return method(self, *args, **kwargs) - k = key(*args, **kwargs) - try: - return c[k] - except KeyError: - pass # key not found - v = method(self, *args, **kwargs) - try: - c[k] = v - except ValueError: - pass # value too large - return v - else: - def wrapper(self, *args, **kwargs): - c = cache(self) - if c is None: - return method(self, *args, **kwargs) - k = key(*args, **kwargs) - try: - with lock(self): - return c[k] - except KeyError: - pass # key not found - v = method(self, *args, **kwargs) - try: - with lock(self): - c[k] = v - except ValueError: - pass # value too large - return v - return _update_wrapper(wrapper, method) - return decorator +__version__ = '4.0.0' diff --git a/cachetools/abc.py b/cachetools/abc.py index 3bc43cc..b61e49b 100644 --- a/cachetools/abc.py +++ b/cachetools/abc.py @@ -1,11 +1,5 @@ -from __future__ import absolute_import - from abc import abstractmethod - -try: - from collections.abc import MutableMapping -except ImportError: - from collections import MutableMapping +from collections.abc import MutableMapping class DefaultMapping(MutableMapping): diff --git a/cachetools/cache.py b/cachetools/cache.py index 5cb8071..4354ca6 100644 --- a/cachetools/cache.py +++ b/cachetools/cache.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - from .abc import DefaultMapping diff --git a/cachetools/decorators.py b/cachetools/decorators.py new file mode 100644 index 0000000..cbea9fc --- /dev/null +++ b/cachetools/decorators.py @@ -0,0 +1,88 @@ +import functools + +from .keys import hashkey + + +def cached(cache, key=hashkey, lock=None): + """Decorator to wrap a function with a memoizing callable that saves + results in a cache. + + """ + def decorator(func): + if cache is None: + def wrapper(*args, **kwargs): + return func(*args, **kwargs) + elif lock is None: + def wrapper(*args, **kwargs): + k = key(*args, **kwargs) + try: + return cache[k] + except KeyError: + pass # key not found + v = func(*args, **kwargs) + try: + cache[k] = v + except ValueError: + pass # value too large + return v + else: + def wrapper(*args, **kwargs): + k = key(*args, **kwargs) + try: + with lock: + return cache[k] + except KeyError: + pass # key not found + v = func(*args, **kwargs) + try: + with lock: + cache[k] = v + except ValueError: + pass # value too large + return v + return functools.update_wrapper(wrapper, func) + return decorator + + +def cachedmethod(cache, key=hashkey, lock=None): + """Decorator to wrap a class or instance method with a memoizing + callable that saves results in a cache. + + """ + def decorator(method): + if lock is None: + def wrapper(self, *args, **kwargs): + c = cache(self) + if c is None: + return method(self, *args, **kwargs) + k = key(*args, **kwargs) + try: + return c[k] + except KeyError: + pass # key not found + v = method(self, *args, **kwargs) + try: + c[k] = v + except ValueError: + pass # value too large + return v + else: + def wrapper(self, *args, **kwargs): + c = cache(self) + if c is None: + return method(self, *args, **kwargs) + k = key(*args, **kwargs) + try: + with lock(self): + return c[k] + except KeyError: + pass # key not found + v = method(self, *args, **kwargs) + try: + with lock(self): + c[k] = v + except ValueError: + pass # value too large + return v + return functools.update_wrapper(wrapper, method) + return decorator diff --git a/cachetools/func.py b/cachetools/func.py index 8ced5dd..581877b 100644 --- a/cachetools/func.py +++ b/cachetools/func.py @@ -1,15 +1,9 @@ """`functools.lru_cache` compatible memoizing function decorators.""" -from __future__ import absolute_import - import collections import functools import random - -try: - from time import monotonic as default_timer -except ImportError: - from time import time as default_timer +import time try: from threading import RLock @@ -85,8 +79,6 @@ def _cache(cache, typed=False): pass # value too large return v functools.update_wrapper(wrapper, func) - if not hasattr(wrapper, '__wrapped__'): - wrapper.__wrapped__ = func # Python 2.7 wrapper.cache_info = cache_info wrapper.cache_clear = cache_clear return wrapper @@ -129,7 +121,7 @@ def rr_cache(maxsize=128, choice=random.choice, typed=False): return _cache(RRCache(maxsize, choice), typed) -def ttl_cache(maxsize=128, ttl=600, timer=default_timer, typed=False): +def ttl_cache(maxsize=128, ttl=600, timer=time.monotonic, typed=False): """Decorator to wrap a function with a memoizing callable that saves up to `maxsize` results based on a Least Recently Used (LRU) algorithm with a per-item time-to-live (TTL) value. diff --git a/cachetools/keys.py b/cachetools/keys.py index 25ac0a7..355d742 100644 --- a/cachetools/keys.py +++ b/cachetools/keys.py @@ -1,7 +1,5 @@ """Key functions for memoizing decorators.""" -from __future__ import absolute_import - __all__ = ('hashkey', 'typedkey') diff --git a/cachetools/lfu.py b/cachetools/lfu.py index 4857c4e..341df01 100644 --- a/cachetools/lfu.py +++ b/cachetools/lfu.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import collections from .cache import Cache diff --git a/cachetools/lru.py b/cachetools/lru.py index 44ec4f1..b72b995 100644 --- a/cachetools/lru.py +++ b/cachetools/lru.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import collections from .cache import Cache @@ -34,15 +32,8 @@ class LRUCache(Cache): else: return (key, self.pop(key)) - if hasattr(collections.OrderedDict, 'move_to_end'): - def __update(self, key): - try: - self.__order.move_to_end(key) - except KeyError: - self.__order[key] = None - else: - def __update(self, key): - try: - self.__order[key] = self.__order.pop(key) - except KeyError: - self.__order[key] = None + def __update(self, key): + try: + self.__order.move_to_end(key) + except KeyError: + self.__order[key] = None diff --git a/cachetools/rr.py b/cachetools/rr.py index 09ff770..88ac07c 100644 --- a/cachetools/rr.py +++ b/cachetools/rr.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import random from .cache import Cache diff --git a/cachetools/ttl.py b/cachetools/ttl.py index 1edde3a..3bac52e 100644 --- a/cachetools/ttl.py +++ b/cachetools/ttl.py @@ -1,11 +1,5 @@ -from __future__ import absolute_import - import collections - -try: - from time import monotonic as default_timer -except ImportError: - from time import time as default_timer +import time from .cache import Cache @@ -61,7 +55,7 @@ class _Timer(object): class TTLCache(Cache): """LRU Cache implementation with per-item time-to-live (TTL) value.""" - def __init__(self, maxsize, ttl, timer=default_timer, getsizeof=None): + def __init__(self, maxsize, ttl, timer=time.monotonic, getsizeof=None): Cache.__init__(self, maxsize, getsizeof) self.__root = root = _Link() root.prev = root.next = root @@ -208,13 +202,7 @@ class TTLCache(Cache): else: return (key, self.pop(key)) - if hasattr(collections.OrderedDict, 'move_to_end'): - def __getlink(self, key): - value = self.__links[key] - self.__links.move_to_end(key) - return value - else: - def __getlink(self, key): - value = self.__links.pop(key) - self.__links[key] = value - return value + def __getlink(self, key): + value = self.__links[key] + self.__links.move_to_end(key) + return value diff --git a/docs/index.rst b/docs/index.rst index 956070a..a7988c9 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -4,7 +4,7 @@ .. module:: cachetools This module provides various memoizing collections and decorators, -including variants of the Python 3 Standard Library `@lru_cache`_ +including variants of the Python Standard Library's `@lru_cache`_ function decorator. For the purpose of this module, a *cache* is a mutable_ mapping_ of a @@ -101,10 +101,8 @@ of one argument used to retrieve the size of an item's value. will be discarded first to make space when necessary. By default, the time-to-live is specified in seconds and - :func:`time.monotonic` is used to retrieve the current time. If - :func:`time.monotonic` is not available, e.g. when running Python - 2.7, :func:`time.time` will be used. A custom `timer` function can - be supplied if needed. + :func:`time.monotonic` is used to retrieve the current time. A + custom `timer` function can be supplied if needed. .. method:: expire(self, time=None) -- cgit v1.2.3 From aa936f0c2fa997dd648bdc24ec767c57e78482e7 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Sun, 15 Dec 2019 20:46:13 +0100 Subject: Prepare v4.0.0. --- .travis.yml | 2 - CHANGELOG.rst | 299 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ CHANGES.rst | 293 ------------------------------------------------------- MANIFEST.in | 2 +- README.rst | 60 ++++++------ docs/conf.py | 15 +-- docs/index.rst | 17 ++-- setup.cfg | 41 ++++++-- setup.py | 41 +------- tox.ini | 1 - 10 files changed, 387 insertions(+), 384 deletions(-) create mode 100644 CHANGELOG.rst delete mode 100644 CHANGES.rst diff --git a/.travis.yml b/.travis.yml index ff4ad09..c466ad8 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,8 +1,6 @@ language: python python: -- 2.7 -- 3.4 - 3.5 - 3.6 - 3.7 diff --git a/CHANGELOG.rst b/CHANGELOG.rst new file mode 100644 index 0000000..0a55f62 --- /dev/null +++ b/CHANGELOG.rst @@ -0,0 +1,299 @@ +v4.0.0 (UNRELEASED) +=================== + +- Require Python 3.5 or later. + + +v3.1.1 (2019-05-23) +=================== + +- Document how to use shared caches with ``@cachedmethod``. + +- Fix pickling/unpickling of cache keys + + +v3.1.0 (2019-01-29) +=================== + +- Fix Python 3.8 compatibility issue. + +- Use ``time.monotonic`` as default timer if available. + +- Improve documentation regarding thread safety. + + +v3.0.0 (2018-11-04) +=================== + +- Officially support Python 3.7. + +- Drop Python 3.3 support (breaking change). + +- Remove ``missing`` cache constructor parameter (breaking change). + +- Remove ``self`` from ``@cachedmethod`` key arguments (breaking + change). + +- Add support for ``maxsize=None`` in ``cachetools.func`` decorators. + + +v2.1.0 (2018-05-12) +=================== + +- Deprecate ``missing`` cache constructor parameter. + +- Handle overridden ``getsizeof()`` method in subclasses. + +- Fix Python 2.7 ``RRCache`` pickling issues. + +- Various documentation improvements. + + +v2.0.1 (2017-08-11) +=================== + +- Officially support Python 3.6. + +- Move documentation to RTD. + +- Documentation: Update import paths for key functions (courtesy of + slavkoja). + + +v2.0.0 (2016-10-03) +=================== + +- Drop Python 3.2 support (breaking change). + +- Drop support for deprecated features (breaking change). + +- Move key functions to separate package (breaking change). + +- Accept non-integer ``maxsize`` in ``Cache.__repr__()``. + + +v1.1.6 (2016-04-01) +=================== + +- Reimplement ``LRUCache`` and ``TTLCache`` using + ``collections.OrderedDict``. Note that this will break pickle + compatibility with previous versions. + +- Fix ``TTLCache`` not calling ``__missing__()`` of derived classes. + +- Handle ``ValueError`` in ``Cache.__missing__()`` for consistency + with caching decorators. + +- Improve how ``TTLCache`` handles expired items. + +- Use ``Counter.most_common()`` for ``LFUCache.popitem()``. + + +v1.1.5 (2015-10-25) +=================== + +- Refactor ``Cache`` base class. Note that this will break pickle + compatibility with previous versions. + +- Clean up ``LRUCache`` and ``TTLCache`` implementations. + + +v1.1.4 (2015-10-24) +=================== + +- Refactor ``LRUCache`` and ``TTLCache`` implementations. Note that + this will break pickle compatibility with previous versions. + +- Document pending removal of deprecated features. + +- Minor documentation improvements. + + +v1.1.3 (2015-09-15) +=================== + +- Fix pickle tests. + + +v1.1.2 (2015-09-15) +=================== + +- Fix pickling of large ``LRUCache`` and ``TTLCache`` instances. + + +v1.1.1 (2015-09-07) +=================== + +- Improve key functions. + +- Improve documentation. + +- Improve unit test coverage. + + +v1.1.0 (2015-08-28) +=================== + +- Add ``@cached`` function decorator. + +- Add ``hashkey`` and ``typedkey`` fuctions. + +- Add `key` and `lock` arguments to ``@cachedmethod``. + +- Set ``__wrapped__`` attributes for Python versions < 3.2. + +- Move ``functools`` compatible decorators to ``cachetools.func``. + +- Deprecate ``@cachedmethod`` `typed` argument. + +- Deprecate `cache` attribute for ``@cachedmethod`` wrappers. + +- Deprecate `getsizeof` and `lock` arguments for `cachetools.func` + decorator. + + +v1.0.3 (2015-06-26) +=================== + +- Clear cache statistics when calling ``clear_cache()``. + + +v1.0.2 (2015-06-18) +=================== + +- Allow simple cache instances to be pickled. + +- Refactor ``Cache.getsizeof`` and ``Cache.missing`` default + implementation. + + +v1.0.1 (2015-06-06) +=================== + +- Code cleanup for improved PEP 8 conformance. + +- Add documentation and unit tests for using ``@cachedmethod`` with + generic mutable mappings. + +- Improve documentation. + + +v1.0.0 (2014-12-19) +=================== + +- Provide ``RRCache.choice`` property. + +- Improve documentation. + + +v0.8.2 (2014-12-15) +=================== + +- Use a ``NestedTimer`` for ``TTLCache``. + + +v0.8.1 (2014-12-07) +=================== + +- Deprecate ``Cache.getsize()``. + + +v0.8.0 (2014-12-03) +=================== + +- Ignore ``ValueError`` raised on cache insertion in decorators. + +- Add ``Cache.getsize()``. + +- Add ``Cache.__missing__()``. + +- Feature freeze for `v1.0`. + + +v0.7.1 (2014-11-22) +=================== + +- Fix `MANIFEST.in`. + + +v0.7.0 (2014-11-12) +=================== + +- Deprecate ``TTLCache.ExpiredError``. + +- Add `choice` argument to ``RRCache`` constructor. + +- Refactor ``LFUCache``, ``LRUCache`` and ``TTLCache``. + +- Use custom ``NullContext`` implementation for unsynchronized + function decorators. + + +v0.6.0 (2014-10-13) +=================== + +- Raise ``TTLCache.ExpiredError`` for expired ``TTLCache`` items. + +- Support unsynchronized function decorators. + +- Allow ``@cachedmethod.cache()`` to return None + + +v0.5.1 (2014-09-25) +=================== + +- No formatting of ``KeyError`` arguments. + +- Update ``README.rst``. + + +v0.5.0 (2014-09-23) +=================== + +- Do not delete expired items in TTLCache.__getitem__(). + +- Add ``@ttl_cache`` function decorator. + +- Fix public ``getsizeof()`` usage. + + +v0.4.0 (2014-06-16) +=================== + +- Add ``TTLCache``. + +- Add ``Cache`` base class. + +- Remove ``@cachedmethod`` `lock` parameter. + + +v0.3.1 (2014-05-07) +=================== + +- Add proper locking for ``cache_clear()`` and ``cache_info()``. + +- Report `size` in ``cache_info()``. + + +v0.3.0 (2014-05-06) +=================== + +- Remove ``@cache`` decorator. + +- Add ``size``, ``getsizeof`` members. + +- Add ``@cachedmethod`` decorator. + + +v0.2.0 (2014-04-02) +=================== + +- Add ``@cache`` decorator. + +- Update documentation. + + +v0.1.0 (2014-03-27) +=================== + +- Initial release. diff --git a/CHANGES.rst b/CHANGES.rst deleted file mode 100644 index c1c0bb3..0000000 --- a/CHANGES.rst +++ /dev/null @@ -1,293 +0,0 @@ -v3.1.1 (2019-05-23) -------------------- - -- Document how to use shared caches with ``@cachedmethod``. - -- Fix pickling/unpickling of cache keys - - -v3.1.0 (2019-01-29) -------------------- - -- Fix Python 3.8 compatibility issue. - -- Use ``time.monotonic`` as default timer if available. - -- Improve documentation regarding thread safety. - - -v3.0.0 (2018-11-04) -------------------- - -- Officially support Python 3.7. - -- Drop Python 3.3 support (breaking change). - -- Remove ``missing`` cache constructor parameter (breaking change). - -- Remove ``self`` from ``@cachedmethod`` key arguments (breaking - change). - -- Add support for ``maxsize=None`` in ``cachetools.func`` decorators. - - -v2.1.0 (2018-05-12) -------------------- - -- Deprecate ``missing`` cache constructor parameter. - -- Handle overridden ``getsizeof()`` method in subclasses. - -- Fix Python 2.7 ``RRCache`` pickling issues. - -- Various documentation improvements. - - -v2.0.1 (2017-08-11) -------------------- - -- Officially support Python 3.6. - -- Move documentation to RTD. - -- Documentation: Update import paths for key functions (courtesy of - slavkoja). - - -v2.0.0 (2016-10-03) -------------------- - -- Drop Python 3.2 support (breaking change). - -- Drop support for deprecated features (breaking change). - -- Move key functions to separate package (breaking change). - -- Accept non-integer ``maxsize`` in ``Cache.__repr__()``. - - -v1.1.6 (2016-04-01) -------------------- - -- Reimplement ``LRUCache`` and ``TTLCache`` using - ``collections.OrderedDict``. Note that this will break pickle - compatibility with previous versions. - -- Fix ``TTLCache`` not calling ``__missing__()`` of derived classes. - -- Handle ``ValueError`` in ``Cache.__missing__()`` for consistency - with caching decorators. - -- Improve how ``TTLCache`` handles expired items. - -- Use ``Counter.most_common()`` for ``LFUCache.popitem()``. - - -v1.1.5 (2015-10-25) -------------------- - -- Refactor ``Cache`` base class. Note that this will break pickle - compatibility with previous versions. - -- Clean up ``LRUCache`` and ``TTLCache`` implementations. - - -v1.1.4 (2015-10-24) -------------------- - -- Refactor ``LRUCache`` and ``TTLCache`` implementations. Note that - this will break pickle compatibility with previous versions. - -- Document pending removal of deprecated features. - -- Minor documentation improvements. - - -v1.1.3 (2015-09-15) -------------------- - -- Fix pickle tests. - - -v1.1.2 (2015-09-15) -------------------- - -- Fix pickling of large ``LRUCache`` and ``TTLCache`` instances. - - -v1.1.1 (2015-09-07) -------------------- - -- Improve key functions. - -- Improve documentation. - -- Improve unit test coverage. - - -v1.1.0 (2015-08-28) -------------------- - -- Add ``@cached`` function decorator. - -- Add ``hashkey`` and ``typedkey`` fuctions. - -- Add `key` and `lock` arguments to ``@cachedmethod``. - -- Set ``__wrapped__`` attributes for Python versions < 3.2. - -- Move ``functools`` compatible decorators to ``cachetools.func``. - -- Deprecate ``@cachedmethod`` `typed` argument. - -- Deprecate `cache` attribute for ``@cachedmethod`` wrappers. - -- Deprecate `getsizeof` and `lock` arguments for `cachetools.func` - decorator. - - -v1.0.3 (2015-06-26) -------------------- - -- Clear cache statistics when calling ``clear_cache()``. - - -v1.0.2 (2015-06-18) -------------------- - -- Allow simple cache instances to be pickled. - -- Refactor ``Cache.getsizeof`` and ``Cache.missing`` default - implementation. - - -v1.0.1 (2015-06-06) -------------------- - -- Code cleanup for improved PEP 8 conformance. - -- Add documentation and unit tests for using ``@cachedmethod`` with - generic mutable mappings. - -- Improve documentation. - - -v1.0.0 (2014-12-19) -------------------- - -- Provide ``RRCache.choice`` property. - -- Improve documentation. - - -v0.8.2 (2014-12-15) -------------------- - -- Use a ``NestedTimer`` for ``TTLCache``. - - -v0.8.1 (2014-12-07) -------------------- - -- Deprecate ``Cache.getsize()``. - - -v0.8.0 (2014-12-03) -------------------- - -- Ignore ``ValueError`` raised on cache insertion in decorators. - -- Add ``Cache.getsize()``. - -- Add ``Cache.__missing__()``. - -- Feature freeze for `v1.0`. - - -v0.7.1 (2014-11-22) -------------------- - -- Fix `MANIFEST.in`. - - -v0.7.0 (2014-11-12) -------------------- - -- Deprecate ``TTLCache.ExpiredError``. - -- Add `choice` argument to ``RRCache`` constructor. - -- Refactor ``LFUCache``, ``LRUCache`` and ``TTLCache``. - -- Use custom ``NullContext`` implementation for unsynchronized - function decorators. - - -v0.6.0 (2014-10-13) -------------------- - -- Raise ``TTLCache.ExpiredError`` for expired ``TTLCache`` items. - -- Support unsynchronized function decorators. - -- Allow ``@cachedmethod.cache()`` to return None - - -v0.5.1 (2014-09-25) -------------------- - -- No formatting of ``KeyError`` arguments. - -- Update ``README.rst``. - - -v0.5.0 (2014-09-23) -------------------- - -- Do not delete expired items in TTLCache.__getitem__(). - -- Add ``@ttl_cache`` function decorator. - -- Fix public ``getsizeof()`` usage. - - -v0.4.0 (2014-06-16) -------------------- - -- Add ``TTLCache``. - -- Add ``Cache`` base class. - -- Remove ``@cachedmethod`` `lock` parameter. - - -v0.3.1 (2014-05-07) -------------------- - -- Add proper locking for ``cache_clear()`` and ``cache_info()``. - -- Report `size` in ``cache_info()``. - - -v0.3.0 (2014-05-06) -------------------- - -- Remove ``@cache`` decorator. - -- Add ``size``, ``getsizeof`` members. - -- Add ``@cachedmethod`` decorator. - - -v0.2.0 (2014-04-02) -------------------- - -- Add ``@cache`` decorator. - -- Update documentation. - - -v0.1.0 (2014-03-27) -------------------- - -- Initial release. diff --git a/MANIFEST.in b/MANIFEST.in index de1c916..fd9d6a9 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,4 +1,4 @@ -include CHANGES.rst +include CHANGELOG.rst include LICENSE include MANIFEST.in include README.rst diff --git a/README.rst b/README.rst index 96f1634..86d981b 100644 --- a/README.rst +++ b/README.rst @@ -1,8 +1,28 @@ cachetools ======================================================================== +.. image:: http://img.shields.io/pypi/v/cachetools + :target: https://pypi.org/project/cachetools/ + :alt: Latest PyPI version + +.. image:: https://img.shields.io/readthedocs/cachetools + :target: http://cachetools.readthedocs.io/ + :alt: Documentation build status + +.. image:: http://img.shields.io/travis/tkem/cachetools + :target: https://travis-ci.org/tkem/cachetools/ + :alt: Travis CI build status + +.. image:: http://img.shields.io/coveralls/tkem/cachetools + :target: https://coveralls.io/r/tkem/cachetools + :alt: Test coverage + +.. image:: https://img.shields.io/github/license/tkem/cachetools + :target: http://raw.github.com/tkem/cachetools/master/LICENSE + :alt: License + This module provides various memoizing collections and decorators, -including variants of the Python 3 Standard Library `@lru_cache`_ +including variants of the Python Standard Library's `@lru_cache`_ function decorator. .. code-block:: python @@ -40,39 +60,22 @@ Multiple cache classes based on different caching algorithms are implemented, and decorators for easily memoizing function and method calls are provided, too. -For more information, please refer to the online documentation_. - Installation ------------------------------------------------------------------------ -Install cachetools using pip:: +cachetools is available from PyPI_ and can be installed by running:: - pip install cachetools + pip install cachetools Project Resources ------------------------------------------------------------------------ -.. image:: http://img.shields.io/pypi/v/cachetools.svg?style=flat - :target: https://pypi.python.org/pypi/cachetools/ - :alt: Latest PyPI version - -.. image:: http://img.shields.io/travis/tkem/cachetools/master.svg?style=flat - :target: https://travis-ci.org/tkem/cachetools/ - :alt: Travis CI build status - -.. image:: http://img.shields.io/coveralls/tkem/cachetools/master.svg?style=flat - :target: https://coveralls.io/r/tkem/cachetools - :alt: Test coverage - -.. image:: https://readthedocs.org/projects/cachetools/badge/?version=latest&style=flat - :target: http://cachetools.readthedocs.io/en/latest/ - :alt: Documentation Status - -- `Issue Tracker`_ -- `Source Code`_ -- `Change Log`_ +- `Documentation`_ +- `Issue tracker`_ +- `Source code`_ +- `Change log`_ License @@ -88,8 +91,9 @@ Licensed under the `MIT License`_. .. _mapping: http://docs.python.org/dev/glossary.html#term-mapping .. _cache algorithm: http://en.wikipedia.org/wiki/Cache_algorithms -.. _Documentation: http://cachetools.readthedocs.io/en/latest/ -.. _Issue Tracker: https://github.com/tkem/cachetools/issues/ -.. _Source Code: https://github.com/tkem/cachetools/ -.. _Change Log: https://github.com/tkem/cachetools/blob/master/CHANGES.rst +.. _PyPI: https://pypi.org/project/cachetools/ +.. _Documentation: https://cachetools.readthedocs.io/ +.. _Issue tracker: https://github.com/tkem/cachetools/issues/ +.. _Source code: https://github.com/tkem/cachetools/ +.. _Change log: https://github.com/tkem/cachetools/blob/master/CHANGELOG.rst .. _MIT License: http://raw.github.com/tkem/cachetools/master/LICENSE diff --git a/docs/conf.py b/docs/conf.py index 3cdb9cf..ce08678 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -1,13 +1,16 @@ -def get_version(filename): - from re import findall - with open(filename) as f: - metadata = dict(findall(r"__([a-z]+)__ = '([^']+)'", f.read())) - return metadata['version'] +def get_version(): + import configparser + import pathlib + + cp = configparser.ConfigParser() + # Python 3.5 ConfigParser does not accept Path as filename + cp.read(str(pathlib.Path(__file__).parent.parent / "setup.cfg")) + return cp["metadata"]["version"] project = 'cachetools' copyright = '2014-2019 Thomas Kemmer' -version = get_version(b'../cachetools/__init__.py') +version = get_version() release = version extensions = [ diff --git a/docs/index.rst b/docs/index.rst index a7988c9..257177e 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -1,5 +1,6 @@ +********************************************************************* :mod:`cachetools` --- Extensible memoizing collections and decorators -======================================================================= +********************************************************************* .. module:: cachetools @@ -27,12 +28,12 @@ calls are provided, too. import operator from cachetools import cached, cachedmethod, LRUCache - import mock + from unittest import mock urllib = mock.MagicMock() Cache implementations ------------------------------------------------------------------------- +===================== This module provides several classes implementing caches using different cache algorithms. All these classes derive from class @@ -117,7 +118,7 @@ of one argument used to retrieve the size of an item's value. Extending cache classes -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +----------------------- Sometimes it may be desirable to notice when and what cache items are evicted, i.e. removed from a cache to make room for new items. Since @@ -172,7 +173,7 @@ in its own right. Memoizing decorators ------------------------------------------------------------------------- +==================== The :mod:`cachetools` module provides decorators for memoizing function and method calls. This can save time when a function is @@ -361,8 +362,9 @@ often called with the same arguments: RFC #1: ... +***************************************************************** :mod:`cachetools.keys` --- Key functions for memoizing decorators -============================================================================ +***************************************************************** .. module:: cachetools.keys @@ -405,8 +407,9 @@ like this:: @cached(LRUCache(maxsize=128), key=envkey) +**************************************************************************** :mod:`cachetools.func` --- :func:`functools.lru_cache` compatible decorators -============================================================================ +**************************************************************************** .. module:: cachetools.func diff --git a/setup.cfg b/setup.cfg index d62e57e..15c566f 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,13 +1,40 @@ -[flake8] -exclude = .git,.tox +[metadata] +name = cachetools +version = 4.0.0 +url = https://github.com/tkem/cachetools/ +author = Thomas Kemmer +author_email = tkemmer@computer.org +license = MIT +license_file = LICENSE +description = Extensible memoizing collections and decorators +long_description = file: README.rst +classifiers = + Development Status :: 5 - Production/Stable + Environment :: Other Environment + Intended Audience :: Developers + License :: OSI Approved :: MIT License + Operating System :: OS Independent + Programming Language :: Python + Programming Language :: Python :: 3 + Programming Language :: Python :: 3.5 + Programming Language :: Python :: 3.6 + Programming Language :: Python :: 3.7 + Programming Language :: Python :: 3.8 + Topic :: Software Development :: Libraries :: Python Modules + +[options] +packages = find: +python_requires = ~= 3.5 -[wheel] -universal = 1 +[options.packages.find] +exclude = + tests + tests.* + +[flake8] +exclude = .git, .tox [build_sphinx] source-dir = docs/ build-dir = docs/_build all_files = 1 - -[upload_sphinx] -upload-dir = docs/_build/html diff --git a/setup.py b/setup.py index efa87c5..6068493 100644 --- a/setup.py +++ b/setup.py @@ -1,40 +1,3 @@ -from setuptools import find_packages, setup +from setuptools import setup - -def get_version(filename): - from re import findall - with open(filename) as f: - metadata = dict(findall("__([a-z]+)__ = '([^']+)'", f.read())) - return metadata['version'] - - -setup( - name='cachetools', - version=get_version('cachetools/__init__.py'), - url='https://github.com/tkem/cachetools', - license='MIT', - author='Thomas Kemmer', - author_email='tkemmer@computer.org', - description='Extensible memoizing collections and decorators', - long_description=open('README.rst').read(), - keywords='cache caching memoize memoizing memoization LRU LFU TTL', - packages=find_packages(exclude=['tests', 'tests.*']), - classifiers=[ - 'Development Status :: 5 - Production/Stable', - 'Environment :: Other Environment', - 'Intended Audience :: Developers', - 'License :: OSI Approved :: MIT License', - 'Operating System :: OS Independent', - 'Programming Language :: Python', - 'Programming Language :: Python :: 2', - 'Programming Language :: Python :: 2.7', - 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.4', - 'Programming Language :: Python :: 3.5', - 'Programming Language :: Python :: 3.6', - 'Programming Language :: Python :: 3.7', - 'Programming Language :: Python :: Implementation :: CPython', - 'Programming Language :: Python :: Implementation :: PyPy', - 'Topic :: Software Development :: Libraries :: Python Modules' - ] -) +setup() diff --git a/tox.ini b/tox.ini index 105ff74..2ae20e1 100644 --- a/tox.ini +++ b/tox.ini @@ -24,7 +24,6 @@ commands = [testenv:doctest] deps = - mock sphinx commands = sphinx-build -W -b doctest -d {envtmpdir}/doctrees docs {envtmpdir}/doctest -- cgit v1.2.3 From fa9d4760dc44cd77f5eafd30cbaeed40a3435c96 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Sun, 15 Dec 2019 20:53:23 +0100 Subject: Release v4.0.0. --- CHANGELOG.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 0a55f62..c080890 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,4 +1,4 @@ -v4.0.0 (UNRELEASED) +v4.0.0 (2019-12-15) =================== - Require Python 3.5 or later. -- cgit v1.2.3 From d33a8bb43e7e995fa9890901a6a32c5cdc1dd9c5 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Wed, 8 Apr 2020 12:50:56 +0200 Subject: Fix #163: Support user_function with cachetools.func decorators. --- cachetools/func.py | 8 ++++++++ docs/index.rst | 21 +++++++++++++++++---- tests/test_func.py | 27 +++++++++++++++++---------- 3 files changed, 42 insertions(+), 14 deletions(-) diff --git a/cachetools/func.py b/cachetools/func.py index 581877b..0124a49 100644 --- a/cachetools/func.py +++ b/cachetools/func.py @@ -93,6 +93,8 @@ def lfu_cache(maxsize=128, typed=False): """ if maxsize is None: return _cache(_UnboundCache(), typed) + elif callable(maxsize): + return _cache(LFUCache(128), typed)(maxsize) else: return _cache(LFUCache(maxsize), typed) @@ -105,6 +107,8 @@ def lru_cache(maxsize=128, typed=False): """ if maxsize is None: return _cache(_UnboundCache(), typed) + elif callable(maxsize): + return _cache(LRUCache(128), typed)(maxsize) else: return _cache(LRUCache(maxsize), typed) @@ -117,6 +121,8 @@ def rr_cache(maxsize=128, choice=random.choice, typed=False): """ if maxsize is None: return _cache(_UnboundCache(), typed) + elif callable(maxsize): + return _cache(RRCache(128, choice), typed)(maxsize) else: return _cache(RRCache(maxsize, choice), typed) @@ -128,5 +134,7 @@ def ttl_cache(maxsize=128, ttl=600, timer=time.monotonic, typed=False): """ if maxsize is None: return _cache(_UnboundTTLCache(ttl, timer), typed) + elif callable(maxsize): + return _cache(TTLCache(128, ttl, timer), typed)(maxsize) else: return _cache(TTLCache(maxsize, ttl, timer), typed) diff --git a/docs/index.rst b/docs/index.rst index 257177e..3661426 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -426,31 +426,44 @@ arguments of different types will be cached separately. For example, ``f(3)`` and ``f(3.0)`` will be treated as distinct calls with distinct results. +If a `user_function` is specified instead, it must be a callable. +This allows the decorator to be applied directly to a user function, +leaving the `maxsize` at its default value of 128:: + + @cachetools.func.lru_cache + def count_vowels(sentence): + sentence = sentence.casefold() + return sum(sentence.count(vowel) for vowel in 'aeiou') + The wrapped function is instrumented with :func:`cache_info` and :func:`cache_clear` functions to provide information about cache performance and clear the cache. Please see the :func:`functools.lru_cache` documentation for details. Also note that all the decorators in this module are thread-safe by default. -.. decorator:: lfu_cache(maxsize=128, typed=False) +.. decorator:: lfu_cache(user_function) + lfu_cache(maxsize=128, typed=False) Decorator that wraps a function with a memoizing callable that saves up to `maxsize` results based on a Least Frequently Used (LFU) algorithm. -.. decorator:: lru_cache(maxsize=128, typed=False) +.. decorator:: lru_cache(user_function) + lru_cache(maxsize=128, typed=False) Decorator that wraps a function with a memoizing callable that saves up to `maxsize` results based on a Least Recently Used (LRU) algorithm. -.. decorator:: rr_cache(maxsize=128, choice=random.choice, typed=False) +.. decorator:: rr_cache(user_function) + rr_cache(maxsize=128, choice=random.choice, typed=False) Decorator that wraps a function with a memoizing callable that saves up to `maxsize` results based on a Random Replacement (RR) algorithm. -.. decorator:: ttl_cache(maxsize=128, ttl=600, timer=time.monotonic, typed=False) +.. decorator:: ttl_cache(user_function) + ttl_cache(maxsize=128, ttl=600, timer=time.monotonic, typed=False) Decorator to wrap a function with a memoizing callable that saves up to `maxsize` results based on a Least Recently Used (LRU) diff --git a/tests/test_func.py b/tests/test_func.py index 473d9c9..31e96e6 100644 --- a/tests/test_func.py +++ b/tests/test_func.py @@ -5,8 +5,8 @@ import cachetools.func class DecoratorTestMixin(object): - def decorator(self, maxsize, typed=False, lock=None): - raise NotImplementedError + def decorator(self, maxsize, **kwargs): + return self.DECORATOR(maxsize, **kwargs) def test_decorator(self): cached = self.decorator(maxsize=2)(lambda n: n) @@ -65,26 +65,33 @@ class DecoratorTestMixin(object): self.assertEqual(cached(1.0), 1.0) self.assertEqual(cached.cache_info(), (2, 2, 2, 2)) + def test_decorator_user_function(self): + cached = self.decorator(lambda n: n) + + self.assertEqual(cached.cache_info(), (0, 0, 128, 0)) + self.assertEqual(cached(1), 1) + self.assertEqual(cached.cache_info(), (0, 1, 128, 1)) + self.assertEqual(cached(1), 1) + self.assertEqual(cached.cache_info(), (1, 1, 128, 1)) + self.assertEqual(cached(1.0), 1.0) + self.assertEqual(cached.cache_info(), (2, 1, 128, 1)) + class LFUDecoratorTest(unittest.TestCase, DecoratorTestMixin): - def decorator(self, maxsize, **kwargs): - return cachetools.func.lfu_cache(maxsize, **kwargs) + DECORATOR = staticmethod(cachetools.func.lfu_cache) class LRUDecoratorTest(unittest.TestCase, DecoratorTestMixin): - def decorator(self, maxsize, **kwargs): - return cachetools.func.lru_cache(maxsize, **kwargs) + DECORATOR = staticmethod(cachetools.func.lru_cache) class RRDecoratorTest(unittest.TestCase, DecoratorTestMixin): - def decorator(self, maxsize, **kwargs): - return cachetools.func.rr_cache(maxsize, **kwargs) + DECORATOR = staticmethod(cachetools.func.rr_cache) class TTLDecoratorTest(unittest.TestCase, DecoratorTestMixin): - def decorator(self, maxsize, **kwargs): - return cachetools.func.ttl_cache(maxsize, **kwargs) + DECORATOR = staticmethod(cachetools.func.ttl_cache) -- cgit v1.2.3 From 1cab650af7d816b8506991f9b77c2e27dca62b9c Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Wed, 8 Apr 2020 13:14:35 +0200 Subject: Fix #164: Support cache_parameters() with cachetools.func decorators. --- cachetools/func.py | 40 +++++++++++++++++++++++----------------- docs/index.rst | 8 +++++++- tests/test_func.py | 24 ++++++++++++++++++------ 3 files changed, 48 insertions(+), 24 deletions(-) diff --git a/cachetools/func.py b/cachetools/func.py index 0124a49..5bfc0dd 100644 --- a/cachetools/func.py +++ b/cachetools/func.py @@ -26,7 +26,9 @@ _CacheInfo = collections.namedtuple('CacheInfo', [ class _UnboundCache(dict): - maxsize = None + @property + def maxsize(self): + return None @property def currsize(self): @@ -42,26 +44,14 @@ class _UnboundTTLCache(TTLCache): return None -def _cache(cache, typed=False): +def _cache(cache, typed): + maxsize = cache.maxsize + def decorator(func): key = keys.typedkey if typed else keys.hashkey lock = RLock() stats = [0, 0] - def cache_info(): - with lock: - hits, misses = stats - maxsize = cache.maxsize - currsize = cache.currsize - return _CacheInfo(hits, misses, maxsize, currsize) - - def cache_clear(): - with lock: - try: - cache.clear() - finally: - stats[:] = [0, 0] - def wrapper(*args, **kwargs): k = key(*args, **kwargs) with lock: @@ -78,9 +68,25 @@ def _cache(cache, typed=False): except ValueError: pass # value too large return v - functools.update_wrapper(wrapper, func) + + def cache_info(): + with lock: + hits, misses = stats + maxsize = cache.maxsize + currsize = cache.currsize + return _CacheInfo(hits, misses, maxsize, currsize) + + def cache_clear(): + with lock: + try: + cache.clear() + finally: + stats[:] = [0, 0] + wrapper.cache_info = cache_info wrapper.cache_clear = cache_clear + wrapper.cache_parameters = lambda: {'maxsize': maxsize, 'typed': typed} + functools.update_wrapper(wrapper, func) return wrapper return decorator diff --git a/docs/index.rst b/docs/index.rst index 3661426..c54f8de 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -435,12 +435,18 @@ leaving the `maxsize` at its default value of 128:: sentence = sentence.casefold() return sum(sentence.count(vowel) for vowel in 'aeiou') -The wrapped function is instrumented with :func:`cache_info` and +The wrapped function is instrumented with a :func:`cache_parameters` +function that returns a new :class:`dict` showing the values for +`maxsize` and `typed`. This is for information purposes only. +Mutating the values has no effect. + +The wrapped function is also instrumented with :func:`cache_info` and :func:`cache_clear` functions to provide information about cache performance and clear the cache. Please see the :func:`functools.lru_cache` documentation for details. Also note that all the decorators in this module are thread-safe by default. + .. decorator:: lfu_cache(user_function) lfu_cache(maxsize=128, typed=False) diff --git a/tests/test_func.py b/tests/test_func.py index 31e96e6..c194a02 100644 --- a/tests/test_func.py +++ b/tests/test_func.py @@ -10,7 +10,9 @@ class DecoratorTestMixin(object): def test_decorator(self): cached = self.decorator(maxsize=2)(lambda n: n) - + self.assertEqual(cached.cache_parameters(), { + 'maxsize': 2, 'typed': False + }) self.assertEqual(cached.cache_info(), (0, 0, 2, 0)) self.assertEqual(cached(1), 1) self.assertEqual(cached.cache_info(), (0, 1, 2, 1)) @@ -21,7 +23,9 @@ class DecoratorTestMixin(object): def test_decorator_clear(self): cached = self.decorator(maxsize=2)(lambda n: n) - + self.assertEqual(cached.cache_parameters(), { + 'maxsize': 2, 'typed': False + }) self.assertEqual(cached.cache_info(), (0, 0, 2, 0)) self.assertEqual(cached(1), 1) self.assertEqual(cached.cache_info(), (0, 1, 2, 1)) @@ -32,7 +36,9 @@ class DecoratorTestMixin(object): def test_decorator_nocache(self): cached = self.decorator(maxsize=0)(lambda n: n) - + self.assertEqual(cached.cache_parameters(), { + 'maxsize': 0, 'typed': False + }) self.assertEqual(cached.cache_info(), (0, 0, 0, 0)) self.assertEqual(cached(1), 1) self.assertEqual(cached.cache_info(), (0, 1, 0, 0)) @@ -43,7 +49,9 @@ class DecoratorTestMixin(object): def test_decorator_unbound(self): cached = self.decorator(maxsize=None)(lambda n: n) - + self.assertEqual(cached.cache_parameters(), { + 'maxsize': None, 'typed': False + }) self.assertEqual(cached.cache_info(), (0, 0, None, 0)) self.assertEqual(cached(1), 1) self.assertEqual(cached.cache_info(), (0, 1, None, 1)) @@ -54,7 +62,9 @@ class DecoratorTestMixin(object): def test_decorator_typed(self): cached = self.decorator(maxsize=2, typed=True)(lambda n: n) - + self.assertEqual(cached.cache_parameters(), { + 'maxsize': 2, 'typed': True + }) self.assertEqual(cached.cache_info(), (0, 0, 2, 0)) self.assertEqual(cached(1), 1) self.assertEqual(cached.cache_info(), (0, 1, 2, 1)) @@ -67,7 +77,9 @@ class DecoratorTestMixin(object): def test_decorator_user_function(self): cached = self.decorator(lambda n: n) - + self.assertEqual(cached.cache_parameters(), { + 'maxsize': 128, 'typed': False + }) self.assertEqual(cached.cache_info(), (0, 0, 128, 0)) self.assertEqual(cached(1), 1) self.assertEqual(cached.cache_info(), (0, 1, 128, 1)) -- cgit v1.2.3 From bb8df8f40556fb1c248b9a2e88aec514cfa6f170 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Wed, 8 Apr 2020 13:23:29 +0200 Subject: Prepare v4.1.0. --- CHANGELOG.rst | 10 ++++++++++ LICENSE | 2 +- README.rst | 2 +- cachetools/__init__.py | 2 +- docs/conf.py | 2 +- setup.cfg | 2 +- 6 files changed, 15 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index c080890..7f71c3f 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,13 @@ +v4.1.0 (UNRELEASED) +=================== + +- Support ``user_function`` with ``cachetools.func`` decorators + (Python 3.8 compatibility). + +- Support ``cache_parameters()`` with ``cachetools.func`` decorators + (Python 3.9 compatibility). + + v4.0.0 (2019-12-15) =================== diff --git a/LICENSE b/LICENSE index 7da84f4..0dc1864 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ The MIT License (MIT) -Copyright (c) 2014-2019 Thomas Kemmer +Copyright (c) 2014-2020 Thomas Kemmer Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in diff --git a/README.rst b/README.rst index 86d981b..9482e3a 100644 --- a/README.rst +++ b/README.rst @@ -81,7 +81,7 @@ Project Resources License ------------------------------------------------------------------------ -Copyright (c) 2014-2019 Thomas Kemmer. +Copyright (c) 2014-2020 Thomas Kemmer. Licensed under the `MIT License`_. diff --git a/cachetools/__init__.py b/cachetools/__init__.py index 95ac6e6..c3078bd 100644 --- a/cachetools/__init__.py +++ b/cachetools/__init__.py @@ -17,4 +17,4 @@ __all__ = ( 'cachedmethod' ) -__version__ = '4.0.0' +__version__ = '4.1.0' diff --git a/docs/conf.py b/docs/conf.py index ce08678..92dda3a 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -9,7 +9,7 @@ def get_version(): project = 'cachetools' -copyright = '2014-2019 Thomas Kemmer' +copyright = '2014-2020 Thomas Kemmer' version = get_version() release = version diff --git a/setup.cfg b/setup.cfg index 15c566f..71391c1 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,6 +1,6 @@ [metadata] name = cachetools -version = 4.0.0 +version = 4.1.0 url = https://github.com/tkem/cachetools/ author = Thomas Kemmer author_email = tkemmer@computer.org -- cgit v1.2.3 From a6258fb26c2b21cf56e84be43807fd5d055480d2 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Wed, 8 Apr 2020 13:33:09 +0200 Subject: Release v4.1.0. --- CHANGELOG.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 7f71c3f..158276d 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,4 +1,4 @@ -v4.1.0 (UNRELEASED) +v4.1.0 (2020-04-08) =================== - Support ``user_function`` with ``cachetools.func`` decorators -- cgit v1.2.3 From 85bb810400f1269ad8594810a4227c95c9b78c95 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Sun, 3 May 2020 21:42:23 +0200 Subject: Add Travis Python 3.9-dev. --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index c466ad8..e575af6 100644 --- a/.travis.yml +++ b/.travis.yml @@ -5,6 +5,7 @@ python: - 3.6 - 3.7 - 3.8 +- 3.9-dev - pypy3 install: -- cgit v1.2.3 From f9704abaaba7cd4ce807aed3d968fb391107d8c6 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Sun, 28 Jun 2020 20:17:38 +0200 Subject: Fix #167: Replace float('inf') with math.inf. --- cachetools/func.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cachetools/func.py b/cachetools/func.py index 5bfc0dd..5baf6de 100644 --- a/cachetools/func.py +++ b/cachetools/func.py @@ -2,6 +2,7 @@ import collections import functools +import math import random import time @@ -37,7 +38,7 @@ class _UnboundCache(dict): class _UnboundTTLCache(TTLCache): def __init__(self, ttl, timer): - TTLCache.__init__(self, float('inf'), ttl, timer) + TTLCache.__init__(self, math.inf, ttl, timer) @property def maxsize(self): -- cgit v1.2.3 From e82cffcfc29acdedf27eba7bee361b0febe7b0a0 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Sun, 28 Jun 2020 20:33:45 +0200 Subject: Fix #174: Improve popitem() exception context handling. --- cachetools/lfu.py | 3 ++- cachetools/lru.py | 3 ++- cachetools/rr.py | 3 ++- cachetools/ttl.py | 3 ++- tests/__init__.py | 16 ++++++++++++++++ 5 files changed, 24 insertions(+), 4 deletions(-) diff --git a/cachetools/lfu.py b/cachetools/lfu.py index 341df01..adb45ee 100644 --- a/cachetools/lfu.py +++ b/cachetools/lfu.py @@ -28,6 +28,7 @@ class LFUCache(Cache): try: (key, _), = self.__counter.most_common(1) except ValueError: - raise KeyError('%s is empty' % self.__class__.__name__) + msg = '%s is empty' % self.__class__.__name__ + raise KeyError(msg) from None else: return (key, self.pop(key)) diff --git a/cachetools/lru.py b/cachetools/lru.py index b72b995..7634f9c 100644 --- a/cachetools/lru.py +++ b/cachetools/lru.py @@ -28,7 +28,8 @@ class LRUCache(Cache): try: key = next(iter(self.__order)) except StopIteration: - raise KeyError('%s is empty' % self.__class__.__name__) + msg = '%s is empty' % self.__class__.__name__ + raise KeyError(msg) from None else: return (key, self.pop(key)) diff --git a/cachetools/rr.py b/cachetools/rr.py index 88ac07c..30f3822 100644 --- a/cachetools/rr.py +++ b/cachetools/rr.py @@ -29,6 +29,7 @@ class RRCache(Cache): try: key = self.__choice(list(self)) except IndexError: - raise KeyError('%s is empty' % self.__class__.__name__) + msg = '%s is empty' % self.__class__.__name__ + raise KeyError(msg) from None else: return (key, self.pop(key)) diff --git a/cachetools/ttl.py b/cachetools/ttl.py index 3bac52e..7822e8b 100644 --- a/cachetools/ttl.py +++ b/cachetools/ttl.py @@ -198,7 +198,8 @@ class TTLCache(Cache): try: key = next(iter(self.__links)) except StopIteration: - raise KeyError('%s is empty' % self.__class__.__name__) + msg = '%s is empty' % self.__class__.__name__ + raise KeyError(msg) from None else: return (key, self.pop(key)) diff --git a/tests/__init__.py b/tests/__init__.py index 0be3f32..82f85ef 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -1,3 +1,7 @@ +import sys +import unittest + + class CacheTestMixin(object): Cache = None @@ -104,6 +108,18 @@ class CacheTestMixin(object): with self.assertRaises(KeyError): cache.popitem() + @unittest.skipUnless(sys.version_info >= (3, 7), 'requires Python 3.7') + def test_popitem_exception_context(self): + # since Python 3.7, MutableMapping.popitem() suppresses + # exception context as implementation detail + exception = None + try: + self.Cache(maxsize=2).popitem() + except Exception as e: + exception = e + self.assertIsNone(exception.__cause__) + self.assertTrue(exception.__suppress_context__) + def _test_missing(self, cache): self.assertEqual(0, cache.currsize) self.assertEqual(2, cache.maxsize) -- cgit v1.2.3 From 185058de3b41b8c3031c276085859a6fa73320fa Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Sun, 28 Jun 2020 20:48:46 +0200 Subject: Fix #170: Improve "envkey" documentation example. --- docs/index.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/index.rst b/docs/index.rst index c54f8de..e27c9bc 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -405,6 +405,10 @@ The :func:`envkey` function can then be used in decorator declarations like this:: @cached(LRUCache(maxsize=128), key=envkey) + def foo(x, y, z, env={}): + pass + + foo(1, 2, 3, env=dict(a='a', b='b')) **************************************************************************** -- cgit v1.2.3 From 4c1907e216c753c5b9fb8692df101a651cc566ed Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Sun, 28 Jun 2020 21:16:38 +0200 Subject: Prepare v4.1.1. --- CHANGELOG.rst | 10 ++++++++++ cachetools/__init__.py | 2 +- setup.cfg | 2 +- 3 files changed, 12 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 158276d..1d70540 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,13 @@ +v4.1.1 (UNRELEASED) +=================== + +- Improve ``popitem()`` exception context handling. + +- Replace ``float('inf')`` with ``math.inf``. + +- Improve "envkey" documentation example. + + v4.1.0 (2020-04-08) =================== diff --git a/cachetools/__init__.py b/cachetools/__init__.py index c3078bd..51d8f7c 100644 --- a/cachetools/__init__.py +++ b/cachetools/__init__.py @@ -17,4 +17,4 @@ __all__ = ( 'cachedmethod' ) -__version__ = '4.1.0' +__version__ = '4.1.1' diff --git a/setup.cfg b/setup.cfg index 71391c1..64f381b 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,6 +1,6 @@ [metadata] name = cachetools -version = 4.1.0 +version = 4.1.1 url = https://github.com/tkem/cachetools/ author = Thomas Kemmer author_email = tkemmer@computer.org -- cgit v1.2.3 From 31a78b5238a686d12eb11ba2850a96d9d68f49cd Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Sun, 28 Jun 2020 21:27:40 +0200 Subject: Release v4.1.1. --- CHANGELOG.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 1d70540..eb28e16 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,4 +1,4 @@ -v4.1.1 (UNRELEASED) +v4.1.1 (2020-06-28) =================== - Improve ``popitem()`` exception context handling. -- cgit v1.2.3 From 55d67e6e19fd4e11a7cfe21e517c7e8d5e7ee10d Mon Sep 17 00:00:00 2001 From: Diego Argueta Date: Tue, 28 Jul 2020 11:20:09 -0700 Subject: Add MRU cache implementation --- cachetools/__init__.py | 2 ++ cachetools/func.py | 16 +++++++++++++++- cachetools/mru.py | 38 ++++++++++++++++++++++++++++++++++++++ tests/test_func.py | 5 +++++ tests/test_mru.py | 50 ++++++++++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 110 insertions(+), 1 deletion(-) create mode 100644 cachetools/mru.py create mode 100644 tests/test_mru.py diff --git a/cachetools/__init__.py b/cachetools/__init__.py index 51d8f7c..029b1a6 100644 --- a/cachetools/__init__.py +++ b/cachetools/__init__.py @@ -4,6 +4,7 @@ from .cache import Cache from .decorators import cached, cachedmethod from .lfu import LFUCache from .lru import LRUCache +from .mru import MRUCache from .rr import RRCache from .ttl import TTLCache @@ -11,6 +12,7 @@ __all__ = ( 'Cache', 'LFUCache', 'LRUCache', + 'MRUCache', 'RRCache', 'TTLCache', 'cached', diff --git a/cachetools/func.py b/cachetools/func.py index 5baf6de..07f45f2 100644 --- a/cachetools/func.py +++ b/cachetools/func.py @@ -14,10 +14,11 @@ except ImportError: # pragma: no cover from . import keys from .lfu import LFUCache from .lru import LRUCache +from .mru import MRUCache from .rr import RRCache from .ttl import TTLCache -__all__ = ('lfu_cache', 'lru_cache', 'rr_cache', 'ttl_cache') +__all__ = ('lfu_cache', 'lru_cache', 'mru_cache', 'rr_cache', 'ttl_cache') _CacheInfo = collections.namedtuple('CacheInfo', [ @@ -120,6 +121,19 @@ def lru_cache(maxsize=128, typed=False): return _cache(LRUCache(maxsize), typed) +def mru_cache(maxsize=128, typed=False): + """Decorator to wrap a function with a memoizing callable that saves + up to `maxsize` results based on a Most Recently Used (MRU) + algorithm. + """ + if maxsize is None: + return _cache(_UnboundCache(), typed) + elif callable(maxsize): + return _cache(MRUCache(128), typed)(maxsize) + else: + return _cache(MRUCache(maxsize), typed) + + def rr_cache(maxsize=128, choice=random.choice, typed=False): """Decorator to wrap a function with a memoizing callable that saves up to `maxsize` results based on a Random Replacement (RR) diff --git a/cachetools/mru.py b/cachetools/mru.py new file mode 100644 index 0000000..62cc9c0 --- /dev/null +++ b/cachetools/mru.py @@ -0,0 +1,38 @@ +import collections + +from cachetools.cache import Cache + + +class MRUCache(Cache): + """Most Recently Used (MRU) cache implementation.""" + + def __init__(self, maxsize, getsizeof=None): + super().__init__(maxsize, getsizeof) + self.__order = collections.OrderedDict() + + def __getitem__(self, key): + value = super().__getitem__(key) + self.__update(key) + return value + + def __setitem__(self, key, value): + super().__setitem__(key, value) + self.__update(key) + + def __delitem__(self, key): + super().__delitem__(key) + del self.__order[key] + + def popitem(self): + """Remove and return the `(key, value)` pair most recently used.""" + if not self.__order: + raise KeyError(type(self).__name__ + ' cache is empty') from None + + key = next(iter(self.__order)) + return (key, self.pop(key)) + + def __update(self, key): + try: + self.__order.move_to_end(key, last=False) + except KeyError: + self.__order[key] = None diff --git a/tests/test_func.py b/tests/test_func.py index c194a02..2aebbd0 100644 --- a/tests/test_func.py +++ b/tests/test_func.py @@ -99,6 +99,11 @@ class LRUDecoratorTest(unittest.TestCase, DecoratorTestMixin): DECORATOR = staticmethod(cachetools.func.lru_cache) +class MRUDecoratorTest(unittest.TestCase, DecoratorTestMixin): + + DECORATOR = staticmethod(cachetools.func.mru_cache) + + class RRDecoratorTest(unittest.TestCase, DecoratorTestMixin): DECORATOR = staticmethod(cachetools.func.rr_cache) diff --git a/tests/test_mru.py b/tests/test_mru.py new file mode 100644 index 0000000..3a9f4d3 --- /dev/null +++ b/tests/test_mru.py @@ -0,0 +1,50 @@ +import unittest + +from cachetools import MRUCache + +from . import CacheTestMixin + + +class MRUCacheTest(unittest.TestCase, CacheTestMixin): + + Cache = MRUCache + + def test_evict__writes_only(self): + cache = MRUCache(maxsize=2) + + cache[1] = 1 + cache[2] = 2 + cache[3] = 3 # Evicts 1 because nothing's been used yet + + assert len(cache) == 2 + assert 1 not in cache, 'Wrong key was evicted. Should have been `1`.' + assert 2 in cache + assert 3 in cache + + def test_evict__with_access(self): + cache = MRUCache(maxsize=2) + + cache[1] = 1 + cache[2] = 2 + cache[1] + cache[2] + cache[3] = 3 # Evicts 2 + assert 2 not in cache, 'Wrong key was evicted. Should have been `2`.' + assert 1 in cache + assert 3 in cache + + def test_evict__with_delete(self): + cache = MRUCache(maxsize=2) + + cache[1] = 1 + cache[2] = 2 + del cache[2] + cache[3] = 3 # Doesn't evict anything because we just deleted 2 + + assert 2 not in cache + assert 1 in cache + + cache[4] = 4 # Should evict 1 as we just accessed it with __contains__ + assert 1 not in cache + assert 3 in cache + assert 4 in cache -- cgit v1.2.3 From 59ff52c872ba88206abf8256e95e6ccbd8694544 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Wed, 9 Dec 2020 19:19:02 +0100 Subject: Add MRU documentation. --- docs/index.rst | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/docs/index.rst b/docs/index.rst index e27c9bc..8728c83 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -81,6 +81,12 @@ of one argument used to retrieve the size of an item's value. This class discards the least recently used items first to make space when necessary. +.. autoclass:: MRUCache(maxsize, getsizeof=None) + :members: + + This class discards the most recently used items first to make + space when necessary. + .. autoclass:: RRCache(maxsize, choice=random.choice, getsizeof=None) :members: @@ -465,6 +471,13 @@ all the decorators in this module are thread-safe by default. saves up to `maxsize` results based on a Least Recently Used (LRU) algorithm. +.. decorator:: mru_cache(user_function) + mru_cache(maxsize=128, typed=False) + + Decorator that wraps a function with a memoizing callable that + saves up to `maxsize` results based on a Most Recently Used (MRU) + algorithm. + .. decorator:: rr_cache(user_function) rr_cache(maxsize=128, choice=random.choice, typed=False) -- cgit v1.2.3 From 93163ec4abe9bc68b603b3bf48b4cea73b77de79 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Wed, 9 Dec 2020 21:08:04 +0100 Subject: Fix #173: Document use of key function for accessing cache items. --- docs/index.rst | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/docs/index.rst b/docs/index.rst index 8728c83..b50c035 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -217,7 +217,7 @@ often called with the same arguments: implementing the `context manager`_ protocol. Any access to the cache will then be nested in a ``with lock:`` statement. This can be used for synchronizing thread access to the cache by providing a - :class:`threading.RLock` instance, for example. + :class:`threading.Lock` instance, for example. .. note:: @@ -237,12 +237,13 @@ often called with the same arguments: .. testcode:: - from threading import RLock + from cachetools.keys import hashkey + from threading import Lock cache = LRUCache(maxsize=32) - lock = RLock() + lock = Lock() - @cached(cache, lock=lock) + @cached(cache, key=hashkey, lock=lock) def get_pep(num): 'Retrieve text of a Python Enhancement Proposal' url = 'http://www.python.org/dev/peps/pep-%04d/' % num @@ -253,6 +254,10 @@ often called with the same arguments: with lock: cache.clear() + # always use the key function for accessing cache items + with lock: + cache.pop(hashkey(42), None) + It is also possible to use a single shared cache object with multiple functions. However, care must be taken that different cache keys are generated for each function, even for identical @@ -284,6 +289,7 @@ often called with the same arguments: >>> list(sorted(numcache.items())) [..., (('fib', 42), 267914296), ..., (('luc', 42), 599074578)] + .. decorator:: cachedmethod(cache, key=cachetools.keys.hashkey, lock=None) Decorator to wrap a class or instance method with a memoizing -- cgit v1.2.3 From 26afdc4b6c50e56801df55ba47f487d228cb425d Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Wed, 9 Dec 2020 21:14:01 +0100 Subject: Fix #184: Add documentation regarding mutability of cached values. --- docs/index.rst | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/docs/index.rst b/docs/index.rst index b50c035..08374ed 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -52,6 +52,13 @@ making the cache's size equal to the number of its items, or named constructor parameter `getsizeof`, which may specify a function of one argument used to retrieve the size of an item's value. +Note that the values of a :class:`Cache` are mutable by default, as +are e.g. the values of a :class:`dict`. It is the user's +responsibility to take care that cached values are not accidentally +modified. This is especially important when using a custom +`getsizeof` function, since the size of an item's value will only be +computed when the item is inserted into the cache. + .. note:: Please be aware that all these classes are *not* thread-safe. -- cgit v1.2.3 From 992c42327b993a96853cfed8dd9a0b196a0c18c4 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Tue, 8 Sep 2020 09:52:27 +0200 Subject: Remove DefaultMapping abstract base class. --- cachetools/abc.py | 46 ---------------------------------------------- cachetools/cache.py | 32 ++++++++++++++++++++++++++++++-- 2 files changed, 30 insertions(+), 48 deletions(-) delete mode 100644 cachetools/abc.py diff --git a/cachetools/abc.py b/cachetools/abc.py deleted file mode 100644 index b61e49b..0000000 --- a/cachetools/abc.py +++ /dev/null @@ -1,46 +0,0 @@ -from abc import abstractmethod -from collections.abc import MutableMapping - - -class DefaultMapping(MutableMapping): - - __slots__ = () - - @abstractmethod - def __contains__(self, key): # pragma: nocover - return False - - @abstractmethod - def __getitem__(self, key): # pragma: nocover - if hasattr(self.__class__, '__missing__'): - return self.__class__.__missing__(self, key) - else: - raise KeyError(key) - - def get(self, key, default=None): - if key in self: - return self[key] - else: - return default - - __marker = object() - - def pop(self, key, default=__marker): - if key in self: - value = self[key] - del self[key] - elif default is self.__marker: - raise KeyError(key) - else: - value = default - return value - - def setdefault(self, key, default=None): - if key in self: - value = self[key] - else: - self[key] = value = default - return value - - -DefaultMapping.register(dict) diff --git a/cachetools/cache.py b/cachetools/cache.py index 4354ca6..ed3d268 100644 --- a/cachetools/cache.py +++ b/cachetools/cache.py @@ -1,7 +1,10 @@ -from .abc import DefaultMapping +from collections.abc import MutableMapping class _DefaultSize(object): + + __slots__ = () + def __getitem__(self, _): return 1 @@ -12,9 +15,11 @@ class _DefaultSize(object): return 1 -class Cache(DefaultMapping): +class Cache(MutableMapping): """Mutable mapping to serve as a simple cache or cache base class.""" + __marker = object() + __size = _DefaultSize() def __init__(self, maxsize, getsizeof=None): @@ -73,6 +78,29 @@ class Cache(DefaultMapping): def __len__(self): return len(self.__data) + def get(self, key, default=None): + if key in self: + return self[key] + else: + return default + + def pop(self, key, default=__marker): + if key in self: + value = self[key] + del self[key] + elif default is self.__marker: + raise KeyError(key) + else: + value = default + return value + + def setdefault(self, key, default=None): + if key in self: + value = self[key] + else: + self[key] = value = default + return value + @property def maxsize(self): """The maximum size of the cache.""" -- cgit v1.2.3 From c9958d9de98458d9a95e441d22b92ee608dbd552 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Wed, 9 Dec 2020 21:23:50 +0100 Subject: Officially support Python 3.9. --- .travis.yml | 2 +- setup.cfg | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index e575af6..06d1552 100644 --- a/.travis.yml +++ b/.travis.yml @@ -5,7 +5,7 @@ python: - 3.6 - 3.7 - 3.8 -- 3.9-dev +- 3.9 - pypy3 install: diff --git a/setup.cfg b/setup.cfg index 64f381b..d0bb33a 100644 --- a/setup.cfg +++ b/setup.cfg @@ -20,6 +20,7 @@ classifiers = Programming Language :: Python :: 3.6 Programming Language :: Python :: 3.7 Programming Language :: Python :: 3.8 + Programming Language :: Python :: 3.9 Topic :: Software Development :: Libraries :: Python Modules [options] -- cgit v1.2.3 From e07d8ecb3b346ae90940e531d9ddc1565da72cd2 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Wed, 9 Dec 2020 22:01:33 +0100 Subject: Homogenize cache implementations. --- cachetools/lfu.py | 3 +-- cachetools/lru.py | 3 +-- cachetools/mru.py | 25 +++++++++++++------------ cachetools/rr.py | 3 +-- cachetools/ttl.py | 3 +-- 5 files changed, 17 insertions(+), 20 deletions(-) diff --git a/cachetools/lfu.py b/cachetools/lfu.py index adb45ee..17c2b9d 100644 --- a/cachetools/lfu.py +++ b/cachetools/lfu.py @@ -28,7 +28,6 @@ class LFUCache(Cache): try: (key, _), = self.__counter.most_common(1) except ValueError: - msg = '%s is empty' % self.__class__.__name__ - raise KeyError(msg) from None + raise KeyError('%s is empty' % type(self).__name__) from None else: return (key, self.pop(key)) diff --git a/cachetools/lru.py b/cachetools/lru.py index 7634f9c..2508e5d 100644 --- a/cachetools/lru.py +++ b/cachetools/lru.py @@ -28,8 +28,7 @@ class LRUCache(Cache): try: key = next(iter(self.__order)) except StopIteration: - msg = '%s is empty' % self.__class__.__name__ - raise KeyError(msg) from None + raise KeyError('%s is empty' % type(self).__name__) from None else: return (key, self.pop(key)) diff --git a/cachetools/mru.py b/cachetools/mru.py index 62cc9c0..92b8e76 100644 --- a/cachetools/mru.py +++ b/cachetools/mru.py @@ -7,29 +7,30 @@ class MRUCache(Cache): """Most Recently Used (MRU) cache implementation.""" def __init__(self, maxsize, getsizeof=None): - super().__init__(maxsize, getsizeof) + Cache.__init__(self, maxsize, getsizeof) self.__order = collections.OrderedDict() - def __getitem__(self, key): - value = super().__getitem__(key) + def __getitem__(self, key, cache_getitem=Cache.__getitem__): + value = cache_getitem(self, key) self.__update(key) return value - def __setitem__(self, key, value): - super().__setitem__(key, value) + def __setitem__(self, key, value, cache_setitem=Cache.__setitem__): + cache_setitem(self, key, value) self.__update(key) - def __delitem__(self, key): - super().__delitem__(key) + def __delitem__(self, key, cache_delitem=Cache.__delitem__): + cache_delitem(self, key) del self.__order[key] def popitem(self): """Remove and return the `(key, value)` pair most recently used.""" - if not self.__order: - raise KeyError(type(self).__name__ + ' cache is empty') from None - - key = next(iter(self.__order)) - return (key, self.pop(key)) + try: + key = next(iter(self.__order)) + except StopIteration: + raise KeyError('%s is empty' % type(self).__name__) from None + else: + return (key, self.pop(key)) def __update(self, key): try: diff --git a/cachetools/rr.py b/cachetools/rr.py index 30f3822..5b47e87 100644 --- a/cachetools/rr.py +++ b/cachetools/rr.py @@ -29,7 +29,6 @@ class RRCache(Cache): try: key = self.__choice(list(self)) except IndexError: - msg = '%s is empty' % self.__class__.__name__ - raise KeyError(msg) from None + raise KeyError('%s is empty' % type(self).__name__) from None else: return (key, self.pop(key)) diff --git a/cachetools/ttl.py b/cachetools/ttl.py index 7822e8b..528a085 100644 --- a/cachetools/ttl.py +++ b/cachetools/ttl.py @@ -198,8 +198,7 @@ class TTLCache(Cache): try: key = next(iter(self.__links)) except StopIteration: - msg = '%s is empty' % self.__class__.__name__ - raise KeyError(msg) from None + raise KeyError('%s is empty' % type(self).__name__) from None else: return (key, self.pop(key)) -- cgit v1.2.3 From ca648b68fc40fe2a9eef8b469d0d130a03611f40 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Wed, 9 Dec 2020 22:25:31 +0100 Subject: Fix #188: In case of a race, prefer the item already in the cache. --- cachetools/decorators.py | 12 ++++++------ cachetools/func.py | 6 +++--- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/cachetools/decorators.py b/cachetools/decorators.py index cbea9fc..217b9a8 100644 --- a/cachetools/decorators.py +++ b/cachetools/decorators.py @@ -34,12 +34,12 @@ def cached(cache, key=hashkey, lock=None): except KeyError: pass # key not found v = func(*args, **kwargs) + # in case of a race, prefer the item already in the cache try: with lock: - cache[k] = v + return cache.setdefault(k, v) except ValueError: - pass # value too large - return v + return v # value too large return functools.update_wrapper(wrapper, func) return decorator @@ -78,11 +78,11 @@ def cachedmethod(cache, key=hashkey, lock=None): except KeyError: pass # key not found v = method(self, *args, **kwargs) + # in case of a race, prefer the item already in the cache try: with lock(self): - c[k] = v + return c.setdefault(k, v) except ValueError: - pass # value too large - return v + return v # value too large return functools.update_wrapper(wrapper, method) return decorator diff --git a/cachetools/func.py b/cachetools/func.py index 07f45f2..0815bac 100644 --- a/cachetools/func.py +++ b/cachetools/func.py @@ -64,12 +64,12 @@ def _cache(cache, typed): except KeyError: stats[1] += 1 v = func(*args, **kwargs) + # in case of a race, prefer the item already in the cache try: with lock: - cache[k] = v + return cache.setdefault(k, v) except ValueError: - pass # value too large - return v + return v # value too large def cache_info(): with lock: -- cgit v1.2.3 From 986d815af6d8ed7c9f93404036dcee58ebf67765 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Wed, 9 Dec 2020 21:47:03 +0100 Subject: Add FIFO cache implementation. --- cachetools/__init__.py | 2 ++ cachetools/fifo.py | 32 ++++++++++++++++++++++++++++ cachetools/func.py | 15 +++++++++++++ docs/index.rst | 13 ++++++++++++ tests/test_fifo.py | 57 ++++++++++++++++++++++++++++++++++++++++++++++++++ tests/test_func.py | 5 +++++ 6 files changed, 124 insertions(+) create mode 100644 cachetools/fifo.py create mode 100644 tests/test_fifo.py diff --git a/cachetools/__init__.py b/cachetools/__init__.py index 029b1a6..428ffc2 100644 --- a/cachetools/__init__.py +++ b/cachetools/__init__.py @@ -2,6 +2,7 @@ from .cache import Cache from .decorators import cached, cachedmethod +from .fifo import FIFOCache from .lfu import LFUCache from .lru import LRUCache from .mru import MRUCache @@ -10,6 +11,7 @@ from .ttl import TTLCache __all__ = ( 'Cache', + 'FIFOCache', 'LFUCache', 'LRUCache', 'MRUCache', diff --git a/cachetools/fifo.py b/cachetools/fifo.py new file mode 100644 index 0000000..38ddca1 --- /dev/null +++ b/cachetools/fifo.py @@ -0,0 +1,32 @@ +import collections + +from .cache import Cache + + +class FIFOCache(Cache): + """First In First Out (FIFO) cache implementation.""" + + def __init__(self, maxsize, getsizeof=None): + Cache.__init__(self, maxsize, getsizeof) + self.__order = collections.OrderedDict() + + def __setitem__(self, key, value, cache_setitem=Cache.__setitem__): + cache_setitem(self, key, value) + try: + self.__order.move_to_end(key) + except KeyError: + self.__order[key] = None + + def __delitem__(self, key, cache_delitem=Cache.__delitem__): + cache_delitem(self, key) + del self.__order[key] + + def popitem(self): + """Remove and return the `(key, value)` pair first inserted.""" + try: + key = next(iter(self.__order)) + except StopIteration: + msg = '%s is empty' % self.__class__.__name__ + raise KeyError(msg) from None + else: + return (key, self.pop(key)) diff --git a/cachetools/func.py b/cachetools/func.py index 0815bac..2be517e 100644 --- a/cachetools/func.py +++ b/cachetools/func.py @@ -12,6 +12,7 @@ except ImportError: # pragma: no cover from dummy_threading import RLock from . import keys +from .fifo import FIFOCache from .lfu import LFUCache from .lru import LRUCache from .mru import MRUCache @@ -93,6 +94,20 @@ def _cache(cache, typed): return decorator +def fifo_cache(maxsize=128, typed=False): + """Decorator to wrap a function with a memoizing callable that saves + up to `maxsize` results based on a First In First Out (FIFO) + algorithm. + + """ + if maxsize is None: + return _cache(_UnboundCache(), typed) + elif callable(maxsize): + return _cache(FIFOCache(128), typed)(maxsize) + else: + return _cache(FIFOCache(maxsize), typed) + + def lfu_cache(maxsize=128, typed=False): """Decorator to wrap a function with a memoizing callable that saves up to `maxsize` results based on a Least Frequently Used (LFU) diff --git a/docs/index.rst b/docs/index.rst index 08374ed..945bd78 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -76,6 +76,12 @@ computed when the item is inserted into the cache. additionally need to override :meth:`__getitem__`, :meth:`__setitem__` and :meth:`__delitem__`. +.. autoclass:: FIFOCache(maxsize, getsizeof=None) + :members: + + This class evicts items in the order they were added to make space + when necessary. + .. autoclass:: LFUCache(maxsize, getsizeof=None) :members: @@ -470,6 +476,13 @@ performance and clear the cache. Please see the all the decorators in this module are thread-safe by default. +.. decorator:: fifo_cache(user_function) + fifo_cache(maxsize=128, typed=False) + + Decorator that wraps a function with a memoizing callable that + saves up to `maxsize` results based on a First In First Out + (FIFO) algorithm. + .. decorator:: lfu_cache(user_function) lfu_cache(maxsize=128, typed=False) diff --git a/tests/test_fifo.py b/tests/test_fifo.py new file mode 100644 index 0000000..933af56 --- /dev/null +++ b/tests/test_fifo.py @@ -0,0 +1,57 @@ +import unittest + +from cachetools import FIFOCache + +from . import CacheTestMixin + + +class LRUCacheTest(unittest.TestCase, CacheTestMixin): + + Cache = FIFOCache + + def test_fifo(self): + cache = FIFOCache(maxsize=2) + + cache[1] = 1 + cache[2] = 2 + cache[3] = 3 + + self.assertEqual(len(cache), 2) + self.assertEqual(cache[2], 2) + self.assertEqual(cache[3], 3) + self.assertNotIn(1, cache) + + cache[2] + cache[4] = 4 + self.assertEqual(len(cache), 2) + self.assertEqual(cache[3], 3) + self.assertEqual(cache[4], 4) + self.assertNotIn(2, cache) + + cache[5] = 5 + self.assertEqual(len(cache), 2) + self.assertEqual(cache[4], 4) + self.assertEqual(cache[5], 5) + self.assertNotIn(3, cache) + + def test_fifo_getsizeof(self): + cache = FIFOCache(maxsize=3, getsizeof=lambda x: x) + + cache[1] = 1 + cache[2] = 2 + + self.assertEqual(len(cache), 2) + self.assertEqual(cache[1], 1) + self.assertEqual(cache[2], 2) + + cache[3] = 3 + + self.assertEqual(len(cache), 1) + self.assertEqual(cache[3], 3) + self.assertNotIn(1, cache) + self.assertNotIn(2, cache) + + with self.assertRaises(ValueError): + cache[4] = 4 + self.assertEqual(len(cache), 1) + self.assertEqual(cache[3], 3) diff --git a/tests/test_func.py b/tests/test_func.py index 2aebbd0..b6c4fe1 100644 --- a/tests/test_func.py +++ b/tests/test_func.py @@ -89,6 +89,11 @@ class DecoratorTestMixin(object): self.assertEqual(cached.cache_info(), (2, 1, 128, 1)) +class FIFODecoratorTest(unittest.TestCase, DecoratorTestMixin): + + DECORATOR = staticmethod(cachetools.func.fifo_cache) + + class LFUDecoratorTest(unittest.TestCase, DecoratorTestMixin): DECORATOR = staticmethod(cachetools.func.lfu_cache) -- cgit v1.2.3 From 6d2692fcb697984fdae53ee38fb3dd61fb3dc27d Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Wed, 9 Dec 2020 22:32:23 +0100 Subject: Prepare v4.2.0. --- CHANGELOG.rst | 15 +++++++++++++++ cachetools/fifo.py | 3 +-- 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index eb28e16..f93956a 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,18 @@ +v4.2.0 (UNRELEASED) +=================== + +- Add FIFO cache implementation. + +- Add MRU cache implementation. + +- Improve behavior of decorators in case of race conditions. + +- Improve documentation regarding mutability of caches values and use + of key functions with decorators. + +- Officially support Python 3.9. + + v4.1.1 (2020-06-28) =================== diff --git a/cachetools/fifo.py b/cachetools/fifo.py index 38ddca1..9f254f1 100644 --- a/cachetools/fifo.py +++ b/cachetools/fifo.py @@ -26,7 +26,6 @@ class FIFOCache(Cache): try: key = next(iter(self.__order)) except StopIteration: - msg = '%s is empty' % self.__class__.__name__ - raise KeyError(msg) from None + raise KeyError('%s is empty' % type(self).__name__) from None else: return (key, self.pop(key)) -- cgit v1.2.3 From 5085c57624065759ea66911e52273f0fea918ac5 Mon Sep 17 00:00:00 2001 From: Paul Fisher Date: Sat, 12 Sep 2020 02:13:24 -0400 Subject: Add a test demonstrating the need for RLock in func.py. --- tests/test_func.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/tests/test_func.py b/tests/test_func.py index b6c4fe1..39dce77 100644 --- a/tests/test_func.py +++ b/tests/test_func.py @@ -88,6 +88,29 @@ class DecoratorTestMixin(object): self.assertEqual(cached(1.0), 1.0) self.assertEqual(cached.cache_info(), (2, 1, 128, 1)) + def test_decorator_needs_rlock(self): + cached = self.decorator(lambda n: n) + + class RecursiveEquals: + def __init__(self, use_cache): + self._use_cache = use_cache + + def __hash__(self): + return hash(self._use_cache) + + def __eq__(self, other): + if self._use_cache: + # This call will happen while the cache-lock is held, + # requiring a reentrant lock to avoid deadlock. + cached(self) + return self._use_cache == other._use_cache + + # Prime the cache. + cached(RecursiveEquals(False)) + cached(RecursiveEquals(True)) + # Then do a call which will cause a deadlock with a non-reentrant lock. + self.assertEqual(cached(RecursiveEquals(True)), RecursiveEquals(True)) + class FIFODecoratorTest(unittest.TestCase, DecoratorTestMixin): -- cgit v1.2.3 From c4e71eccd7d27e26df166a434d7f9004b224ea81 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Wed, 9 Dec 2020 23:44:49 +0100 Subject: Add Libraries.io SourceRank shield and link. --- README.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/README.rst b/README.rst index 9482e3a..6126d49 100644 --- a/README.rst +++ b/README.rst @@ -17,6 +17,10 @@ cachetools :target: https://coveralls.io/r/tkem/cachetools :alt: Test coverage +.. image:: https://img.shields.io/librariesio/sourcerank/pypi/cachetools + :target: https://libraries.io/pypi/cachetools + :alt: Libraries.io SourceRank + .. image:: https://img.shields.io/github/license/tkem/cachetools :target: http://raw.github.com/tkem/cachetools/master/LICENSE :alt: License -- cgit v1.2.3 From e6acc1eaff5593158933d24ca035d746d5a13899 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Thu, 10 Dec 2020 21:25:58 +0100 Subject: Release v4.2.0. --- CHANGELOG.rst | 2 +- cachetools/__init__.py | 2 +- setup.cfg | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index f93956a..6ebd203 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,4 +1,4 @@ -v4.2.0 (UNRELEASED) +v4.2.0 (2020-12-10) =================== - Add FIFO cache implementation. diff --git a/cachetools/__init__.py b/cachetools/__init__.py index 428ffc2..4be1ebe 100644 --- a/cachetools/__init__.py +++ b/cachetools/__init__.py @@ -21,4 +21,4 @@ __all__ = ( 'cachedmethod' ) -__version__ = '4.1.1' +__version__ = '4.2.0' diff --git a/setup.cfg b/setup.cfg index d0bb33a..e7af1ee 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,6 +1,6 @@ [metadata] name = cachetools -version = 4.1.1 +version = 4.2.0 url = https://github.com/tkem/cachetools/ author = Thomas Kemmer author_email = tkemmer@computer.org -- cgit v1.2.3 From 4862ce336ca177dd79bafea67a05302c22102532 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Sun, 24 Jan 2021 20:31:40 +0100 Subject: Fix #200: Handle __missing__ not storing cache items. --- cachetools/lfu.py | 3 ++- cachetools/lru.py | 3 ++- cachetools/mru.py | 3 ++- tests/__init__.py | 49 ++++++++++++++++++++++++++++++++----------------- 4 files changed, 38 insertions(+), 20 deletions(-) diff --git a/cachetools/lfu.py b/cachetools/lfu.py index 17c2b9d..894a326 100644 --- a/cachetools/lfu.py +++ b/cachetools/lfu.py @@ -12,7 +12,8 @@ class LFUCache(Cache): def __getitem__(self, key, cache_getitem=Cache.__getitem__): value = cache_getitem(self, key) - self.__counter[key] -= 1 + if key in self: # __missing__ may not store item + self.__counter[key] -= 1 return value def __setitem__(self, key, value, cache_setitem=Cache.__setitem__): diff --git a/cachetools/lru.py b/cachetools/lru.py index 2508e5d..33749d1 100644 --- a/cachetools/lru.py +++ b/cachetools/lru.py @@ -12,7 +12,8 @@ class LRUCache(Cache): def __getitem__(self, key, cache_getitem=Cache.__getitem__): value = cache_getitem(self, key) - self.__update(key) + if key in self: # __missing__ may not store item + self.__update(key) return value def __setitem__(self, key, value, cache_setitem=Cache.__setitem__): diff --git a/cachetools/mru.py b/cachetools/mru.py index 92b8e76..41e2aa1 100644 --- a/cachetools/mru.py +++ b/cachetools/mru.py @@ -12,7 +12,8 @@ class MRUCache(Cache): def __getitem__(self, key, cache_getitem=Cache.__getitem__): value = cache_getitem(self, key) - self.__update(key) + if key in self: # __missing__ may not store item + self.__update(key) return value def __setitem__(self, key, value, cache_setitem=Cache.__setitem__): diff --git a/tests/__init__.py b/tests/__init__.py index 82f85ef..70a0f03 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -120,7 +120,14 @@ class CacheTestMixin(object): self.assertIsNone(exception.__cause__) self.assertTrue(exception.__suppress_context__) - def _test_missing(self, cache): + def test_missing(self): + class DefaultCache(self.Cache): + def __missing__(self, key): + self[key] = key + return key + + cache = DefaultCache(maxsize=2) + self.assertEqual(0, cache.currsize) self.assertEqual(2, cache.maxsize) self.assertEqual(0, len(cache)) @@ -172,30 +179,38 @@ class CacheTestMixin(object): self.assertTrue(1 in cache or 2 in cache) self.assertTrue(1 not in cache or 2 not in cache) - def _test_missing_getsizeof(self, cache): + def test_missing_getsizeof(self): + class DefaultCache(self.Cache): + def __missing__(self, key): + try: + self[key] = key + except ValueError: + pass # not stored + return key + + cache = DefaultCache(maxsize=2, getsizeof=lambda x: x) + self.assertEqual(0, cache.currsize) self.assertEqual(2, cache.maxsize) + self.assertEqual(1, cache[1]) + self.assertEqual(1, len(cache)) + self.assertEqual(1, cache.currsize) self.assertIn(1, cache) + self.assertEqual(2, cache[2]) + self.assertEqual(1, len(cache)) + self.assertEqual(2, cache.currsize) self.assertNotIn(1, cache) self.assertIn(2, cache) - self.assertEqual(3, cache[3]) - self.assertNotIn(1, cache) - self.assertIn(2, cache) - self.assertNotIn(3, cache) - def test_missing_subclass(self): - class Cache(self.Cache): - def __missing__(self, key): - try: - self[key] = key - except ValueError: - pass - return key - - self._test_missing(Cache(maxsize=2)) - self._test_missing_getsizeof(Cache(maxsize=2, getsizeof=lambda x: x)) + self.assertEqual(3, cache[3]) # not stored + self.assertEqual(1, len(cache)) + self.assertEqual(2, cache.currsize) + self.assertEqual(1, cache[1]) + self.assertEqual(1, len(cache)) + self.assertEqual(1, cache.currsize) + self.assertEqual((1, 1), cache.popitem()) def _test_getsizeof(self, cache): self.assertEqual(0, cache.currsize) -- cgit v1.2.3 From 2a2e715c31aeb7ba45580937406ea9d4164067d1 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Sun, 24 Jan 2021 20:35:07 +0100 Subject: Fix #200: Clean up __missing__ example. --- docs/index.rst | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/docs/index.rst b/docs/index.rst index 945bd78..1c4d05b 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -171,13 +171,10 @@ key is not found: ... def __missing__(self, key): ... """Retrieve text of a Python Enhancement Proposal""" ... url = 'http://www.python.org/dev/peps/pep-%04d/' % key - ... try: - ... with urllib.request.urlopen(url) as s: - ... pep = s.read() - ... self[key] = pep # store text in cache - ... return pep - ... except urllib.error.HTTPError: - ... return 'Not Found' # do not store in cache + ... with urllib.request.urlopen(url) as s: + ... pep = s.read() + ... self[key] = pep # store text in cache + ... return pep >>> peps = PepStore(maxsize=4) >>> for n in 8, 9, 290, 308, 320, 8, 218, 320, 279, 289, 320: -- cgit v1.2.3 From 1d99e165b591508edf9ad00a66c8ed9c4f6edc9f Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Sun, 24 Jan 2021 20:49:22 +0100 Subject: Prepare v4.2.1. --- CHANGELOG.rst | 8 ++++++++ README.rst | 2 +- cachetools/__init__.py | 2 +- docs/conf.py | 2 +- setup.cfg | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 6ebd203..ef1a8a1 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,11 @@ +v4.2.1 (UNRELEASED) +=================== + +- Handle ``__missing__()`` not storing cache items. + +- Clean up ``__missing__()`` example. + + v4.2.0 (2020-12-10) =================== diff --git a/README.rst b/README.rst index 6126d49..9fd8c36 100644 --- a/README.rst +++ b/README.rst @@ -85,7 +85,7 @@ Project Resources License ------------------------------------------------------------------------ -Copyright (c) 2014-2020 Thomas Kemmer. +Copyright (c) 2014-2021 Thomas Kemmer. Licensed under the `MIT License`_. diff --git a/cachetools/__init__.py b/cachetools/__init__.py index 4be1ebe..2e66e20 100644 --- a/cachetools/__init__.py +++ b/cachetools/__init__.py @@ -21,4 +21,4 @@ __all__ = ( 'cachedmethod' ) -__version__ = '4.2.0' +__version__ = '4.2.1' diff --git a/docs/conf.py b/docs/conf.py index 92dda3a..9efbd40 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -9,7 +9,7 @@ def get_version(): project = 'cachetools' -copyright = '2014-2020 Thomas Kemmer' +copyright = '2014-2021 Thomas Kemmer' version = get_version() release = version diff --git a/setup.cfg b/setup.cfg index e7af1ee..5e40883 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,6 +1,6 @@ [metadata] name = cachetools -version = 4.2.0 +version = 4.2.1 url = https://github.com/tkem/cachetools/ author = Thomas Kemmer author_email = tkemmer@computer.org -- cgit v1.2.3 From 16c77042c5734fa465788a386be84fc63cb68e71 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Sun, 24 Jan 2021 21:35:22 +0100 Subject: Release v4.2.1. --- CHANGELOG.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index ef1a8a1..4bfad6b 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,4 +1,4 @@ -v4.2.1 (UNRELEASED) +v4.2.1 (2021-01-24) =================== - Handle ``__missing__()`` not storing cache items. -- cgit v1.2.3 From 1311be69d9e68199cc800123f7bc5c610dd720d6 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Mon, 8 Mar 2021 22:27:57 +0100 Subject: Use https URIs only. --- README.rst | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/README.rst b/README.rst index 9fd8c36..732e26f 100644 --- a/README.rst +++ b/README.rst @@ -1,19 +1,19 @@ cachetools ======================================================================== -.. image:: http://img.shields.io/pypi/v/cachetools +.. image:: https://img.shields.io/pypi/v/cachetools :target: https://pypi.org/project/cachetools/ :alt: Latest PyPI version .. image:: https://img.shields.io/readthedocs/cachetools - :target: http://cachetools.readthedocs.io/ + :target: https://cachetools.readthedocs.io/ :alt: Documentation build status -.. image:: http://img.shields.io/travis/tkem/cachetools +.. image:: https://img.shields.io/travis/tkem/cachetools :target: https://travis-ci.org/tkem/cachetools/ :alt: Travis CI build status -.. image:: http://img.shields.io/coveralls/tkem/cachetools +.. image:: https://img.shields.io/coveralls/tkem/cachetools :target: https://coveralls.io/r/tkem/cachetools :alt: Test coverage @@ -22,7 +22,7 @@ cachetools :alt: Libraries.io SourceRank .. image:: https://img.shields.io/github/license/tkem/cachetools - :target: http://raw.github.com/tkem/cachetools/master/LICENSE + :target: https://raw.github.com/tkem/cachetools/master/LICENSE :alt: License This module provides various memoizing collections and decorators, @@ -90,14 +90,14 @@ Copyright (c) 2014-2021 Thomas Kemmer. Licensed under the `MIT License`_. -.. _@lru_cache: http://docs.python.org/3/library/functools.html#functools.lru_cache -.. _mutable: http://docs.python.org/dev/glossary.html#term-mutable -.. _mapping: http://docs.python.org/dev/glossary.html#term-mapping -.. _cache algorithm: http://en.wikipedia.org/wiki/Cache_algorithms +.. _@lru_cache: https://docs.python.org/3/library/functools.html#functools.lru_cache +.. _mutable: https://docs.python.org/dev/glossary.html#term-mutable +.. _mapping: https://docs.python.org/dev/glossary.html#term-mapping +.. _cache algorithm: https://en.wikipedia.org/wiki/Cache_algorithms .. _PyPI: https://pypi.org/project/cachetools/ .. _Documentation: https://cachetools.readthedocs.io/ .. _Issue tracker: https://github.com/tkem/cachetools/issues/ .. _Source code: https://github.com/tkem/cachetools/ .. _Change log: https://github.com/tkem/cachetools/blob/master/CHANGELOG.rst -.. _MIT License: http://raw.github.com/tkem/cachetools/master/LICENSE +.. _MIT License: https://raw.github.com/tkem/cachetools/master/LICENSE -- cgit v1.2.3 From 187b13a6da3e04511f3ed0c59889597edaec7f04 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Tue, 27 Apr 2021 07:12:43 +0200 Subject: Format code with black. --- README.rst | 4 ++++ cachetools/__init__.py | 20 ++++++++++---------- cachetools/cache.py | 4 ++-- cachetools/decorators.py | 14 ++++++++++++++ cachetools/fifo.py | 2 +- cachetools/func.py | 12 ++++++------ cachetools/keys.py | 2 +- cachetools/lfu.py | 4 ++-- cachetools/lru.py | 2 +- cachetools/mru.py | 2 +- cachetools/rr.py | 2 +- cachetools/ttl.py | 5 ++--- docs/conf.py | 18 +++++++++--------- setup.cfg | 5 ++++- tests/__init__.py | 10 +++++----- tests/test_func.py | 25 ++++++------------------- tests/test_keys.py | 3 +-- tests/test_method.py | 11 ++++------- tests/test_mru.py | 4 ++-- tests/test_wrapper.py | 8 ++------ tox.ini | 2 ++ 21 files changed, 80 insertions(+), 79 deletions(-) diff --git a/README.rst b/README.rst index 732e26f..5370b34 100644 --- a/README.rst +++ b/README.rst @@ -25,6 +25,10 @@ cachetools :target: https://raw.github.com/tkem/cachetools/master/LICENSE :alt: License +.. image:: https://img.shields.io/badge/code%20style-black-000000.svg + :target: https://github.com/psf/black + :alt: Code style: black + This module provides various memoizing collections and decorators, including variants of the Python Standard Library's `@lru_cache`_ function decorator. diff --git a/cachetools/__init__.py b/cachetools/__init__.py index 2e66e20..1c925cc 100644 --- a/cachetools/__init__.py +++ b/cachetools/__init__.py @@ -10,15 +10,15 @@ from .rr import RRCache from .ttl import TTLCache __all__ = ( - 'Cache', - 'FIFOCache', - 'LFUCache', - 'LRUCache', - 'MRUCache', - 'RRCache', - 'TTLCache', - 'cached', - 'cachedmethod' + "Cache", + "FIFOCache", + "LFUCache", + "LRUCache", + "MRUCache", + "RRCache", + "TTLCache", + "cached", + "cachedmethod", ) -__version__ = '4.2.1' +__version__ = "4.2.1" diff --git a/cachetools/cache.py b/cachetools/cache.py index ed3d268..0c81d06 100644 --- a/cachetools/cache.py +++ b/cachetools/cache.py @@ -32,7 +32,7 @@ class Cache(MutableMapping): self.__maxsize = maxsize def __repr__(self): - return '%s(%r, maxsize=%r, currsize=%r)' % ( + return "%s(%r, maxsize=%r, currsize=%r)" % ( self.__class__.__name__, list(self.__data.items()), self.__maxsize, @@ -49,7 +49,7 @@ class Cache(MutableMapping): maxsize = self.__maxsize size = self.getsizeof(value) if size > maxsize: - raise ValueError('value too large') + raise ValueError("value too large") if key not in self.__data or self.__size[key] < size: while self.__currsize + size > maxsize: self.popitem() diff --git a/cachetools/decorators.py b/cachetools/decorators.py index 217b9a8..3e78603 100644 --- a/cachetools/decorators.py +++ b/cachetools/decorators.py @@ -8,11 +8,15 @@ def cached(cache, key=hashkey, lock=None): results in a cache. """ + def decorator(func): if cache is None: + def wrapper(*args, **kwargs): return func(*args, **kwargs) + elif lock is None: + def wrapper(*args, **kwargs): k = key(*args, **kwargs) try: @@ -25,7 +29,9 @@ def cached(cache, key=hashkey, lock=None): except ValueError: pass # value too large return v + else: + def wrapper(*args, **kwargs): k = key(*args, **kwargs) try: @@ -40,7 +46,9 @@ def cached(cache, key=hashkey, lock=None): return cache.setdefault(k, v) except ValueError: return v # value too large + return functools.update_wrapper(wrapper, func) + return decorator @@ -49,8 +57,10 @@ def cachedmethod(cache, key=hashkey, lock=None): callable that saves results in a cache. """ + def decorator(method): if lock is None: + def wrapper(self, *args, **kwargs): c = cache(self) if c is None: @@ -66,7 +76,9 @@ def cachedmethod(cache, key=hashkey, lock=None): except ValueError: pass # value too large return v + else: + def wrapper(self, *args, **kwargs): c = cache(self) if c is None: @@ -84,5 +96,7 @@ def cachedmethod(cache, key=hashkey, lock=None): return c.setdefault(k, v) except ValueError: return v # value too large + return functools.update_wrapper(wrapper, method) + return decorator diff --git a/cachetools/fifo.py b/cachetools/fifo.py index 9f254f1..e7c377e 100644 --- a/cachetools/fifo.py +++ b/cachetools/fifo.py @@ -26,6 +26,6 @@ class FIFOCache(Cache): try: key = next(iter(self.__order)) except StopIteration: - raise KeyError('%s is empty' % type(self).__name__) from None + raise KeyError("%s is empty" % type(self).__name__) from None else: return (key, self.pop(key)) diff --git a/cachetools/func.py b/cachetools/func.py index 2be517e..57fb72d 100644 --- a/cachetools/func.py +++ b/cachetools/func.py @@ -19,16 +19,15 @@ from .mru import MRUCache from .rr import RRCache from .ttl import TTLCache -__all__ = ('lfu_cache', 'lru_cache', 'mru_cache', 'rr_cache', 'ttl_cache') +__all__ = ("lfu_cache", "lru_cache", "mru_cache", "rr_cache", "ttl_cache") -_CacheInfo = collections.namedtuple('CacheInfo', [ - 'hits', 'misses', 'maxsize', 'currsize' -]) +_CacheInfo = collections.namedtuple( + "CacheInfo", ["hits", "misses", "maxsize", "currsize"] +) class _UnboundCache(dict): - @property def maxsize(self): return None @@ -88,9 +87,10 @@ def _cache(cache, typed): wrapper.cache_info = cache_info wrapper.cache_clear = cache_clear - wrapper.cache_parameters = lambda: {'maxsize': maxsize, 'typed': typed} + wrapper.cache_parameters = lambda: {"maxsize": maxsize, "typed": typed} functools.update_wrapper(wrapper, func) return wrapper + return decorator diff --git a/cachetools/keys.py b/cachetools/keys.py index 355d742..13630a4 100644 --- a/cachetools/keys.py +++ b/cachetools/keys.py @@ -1,6 +1,6 @@ """Key functions for memoizing decorators.""" -__all__ = ('hashkey', 'typedkey') +__all__ = ("hashkey", "typedkey") class _HashedTuple(tuple): diff --git a/cachetools/lfu.py b/cachetools/lfu.py index 894a326..6289b5c 100644 --- a/cachetools/lfu.py +++ b/cachetools/lfu.py @@ -27,8 +27,8 @@ class LFUCache(Cache): def popitem(self): """Remove and return the `(key, value)` pair least frequently used.""" try: - (key, _), = self.__counter.most_common(1) + ((key, _),) = self.__counter.most_common(1) except ValueError: - raise KeyError('%s is empty' % type(self).__name__) from None + raise KeyError("%s is empty" % type(self).__name__) from None else: return (key, self.pop(key)) diff --git a/cachetools/lru.py b/cachetools/lru.py index 33749d1..dbbe787 100644 --- a/cachetools/lru.py +++ b/cachetools/lru.py @@ -29,7 +29,7 @@ class LRUCache(Cache): try: key = next(iter(self.__order)) except StopIteration: - raise KeyError('%s is empty' % type(self).__name__) from None + raise KeyError("%s is empty" % type(self).__name__) from None else: return (key, self.pop(key)) diff --git a/cachetools/mru.py b/cachetools/mru.py index 41e2aa1..92ec6db 100644 --- a/cachetools/mru.py +++ b/cachetools/mru.py @@ -29,7 +29,7 @@ class MRUCache(Cache): try: key = next(iter(self.__order)) except StopIteration: - raise KeyError('%s is empty' % type(self).__name__) from None + raise KeyError("%s is empty" % type(self).__name__) from None else: return (key, self.pop(key)) diff --git a/cachetools/rr.py b/cachetools/rr.py index 5b47e87..561dbe5 100644 --- a/cachetools/rr.py +++ b/cachetools/rr.py @@ -29,6 +29,6 @@ class RRCache(Cache): try: key = self.__choice(list(self)) except IndexError: - raise KeyError('%s is empty' % type(self).__name__) from None + raise KeyError("%s is empty" % type(self).__name__) from None else: return (key, self.pop(key)) diff --git a/cachetools/ttl.py b/cachetools/ttl.py index 528a085..72f6d52 100644 --- a/cachetools/ttl.py +++ b/cachetools/ttl.py @@ -6,7 +6,7 @@ from .cache import Cache class _Link(object): - __slots__ = ('key', 'expire', 'next', 'prev') + __slots__ = ("key", "expire", "next", "prev") def __init__(self, key=None, expire=None): self.key = key @@ -23,7 +23,6 @@ class _Link(object): class _Timer(object): - def __init__(self, timer): self.__timer = timer self.__nesting = 0 @@ -198,7 +197,7 @@ class TTLCache(Cache): try: key = next(iter(self.__links)) except StopIteration: - raise KeyError('%s is empty' % type(self).__name__) from None + raise KeyError("%s is empty" % type(self).__name__) from None else: return (key, self.pop(key)) diff --git a/docs/conf.py b/docs/conf.py index 9efbd40..f55e5a5 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -8,17 +8,17 @@ def get_version(): return cp["metadata"]["version"] -project = 'cachetools' -copyright = '2014-2021 Thomas Kemmer' +project = "cachetools" +copyright = "2014-2021 Thomas Kemmer" version = get_version() release = version extensions = [ - 'sphinx.ext.autodoc', - 'sphinx.ext.coverage', - 'sphinx.ext.doctest', - 'sphinx.ext.todo' + "sphinx.ext.autodoc", + "sphinx.ext.coverage", + "sphinx.ext.doctest", + "sphinx.ext.todo", ] -exclude_patterns = ['_build'] -master_doc = 'index' -html_theme = 'default' +exclude_patterns = ["_build"] +master_doc = "index" +html_theme = "default" diff --git a/setup.cfg b/setup.cfg index 5e40883..bd7fb73 100644 --- a/setup.cfg +++ b/setup.cfg @@ -33,7 +33,10 @@ exclude = tests.* [flake8] -exclude = .git, .tox +max-line-length = 80 +exclude = .git, .tox, build +select = C, E, F, W, B, B950, I, N +ignore = E501, W503 [build_sphinx] source-dir = docs/ diff --git a/tests/__init__.py b/tests/__init__.py index 70a0f03..bfd5ec6 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -12,7 +12,7 @@ class CacheTestMixin(object): self.assertEqual(1, cache.maxsize) self.assertEqual(0, cache.currsize) self.assertEqual(1, cache.getsizeof(None)) - self.assertEqual(1, cache.getsizeof('')) + self.assertEqual(1, cache.getsizeof("")) self.assertEqual(1, cache.getsizeof(0)) self.assertTrue(repr(cache).startswith(cache.__class__.__name__)) @@ -47,10 +47,10 @@ class CacheTestMixin(object): self.assertEqual(1, cache[1]) self.assertEqual(2, cache[2]) - cache.update({1: 'a', 2: 'b'}) + cache.update({1: "a", 2: "b"}) self.assertEqual(2, len(cache)) - self.assertEqual('a', cache[1]) - self.assertEqual('b', cache[2]) + self.assertEqual("a", cache[1]) + self.assertEqual("b", cache[2]) def test_delete(self): cache = self.Cache(maxsize=2) @@ -108,7 +108,7 @@ class CacheTestMixin(object): with self.assertRaises(KeyError): cache.popitem() - @unittest.skipUnless(sys.version_info >= (3, 7), 'requires Python 3.7') + @unittest.skipUnless(sys.version_info >= (3, 7), "requires Python 3.7") def test_popitem_exception_context(self): # since Python 3.7, MutableMapping.popitem() suppresses # exception context as implementation detail diff --git a/tests/test_func.py b/tests/test_func.py index 39dce77..721d5a6 100644 --- a/tests/test_func.py +++ b/tests/test_func.py @@ -4,15 +4,12 @@ import cachetools.func class DecoratorTestMixin(object): - def decorator(self, maxsize, **kwargs): return self.DECORATOR(maxsize, **kwargs) def test_decorator(self): cached = self.decorator(maxsize=2)(lambda n: n) - self.assertEqual(cached.cache_parameters(), { - 'maxsize': 2, 'typed': False - }) + self.assertEqual(cached.cache_parameters(), {"maxsize": 2, "typed": False}) self.assertEqual(cached.cache_info(), (0, 0, 2, 0)) self.assertEqual(cached(1), 1) self.assertEqual(cached.cache_info(), (0, 1, 2, 1)) @@ -23,9 +20,7 @@ class DecoratorTestMixin(object): def test_decorator_clear(self): cached = self.decorator(maxsize=2)(lambda n: n) - self.assertEqual(cached.cache_parameters(), { - 'maxsize': 2, 'typed': False - }) + self.assertEqual(cached.cache_parameters(), {"maxsize": 2, "typed": False}) self.assertEqual(cached.cache_info(), (0, 0, 2, 0)) self.assertEqual(cached(1), 1) self.assertEqual(cached.cache_info(), (0, 1, 2, 1)) @@ -36,9 +31,7 @@ class DecoratorTestMixin(object): def test_decorator_nocache(self): cached = self.decorator(maxsize=0)(lambda n: n) - self.assertEqual(cached.cache_parameters(), { - 'maxsize': 0, 'typed': False - }) + self.assertEqual(cached.cache_parameters(), {"maxsize": 0, "typed": False}) self.assertEqual(cached.cache_info(), (0, 0, 0, 0)) self.assertEqual(cached(1), 1) self.assertEqual(cached.cache_info(), (0, 1, 0, 0)) @@ -49,9 +42,7 @@ class DecoratorTestMixin(object): def test_decorator_unbound(self): cached = self.decorator(maxsize=None)(lambda n: n) - self.assertEqual(cached.cache_parameters(), { - 'maxsize': None, 'typed': False - }) + self.assertEqual(cached.cache_parameters(), {"maxsize": None, "typed": False}) self.assertEqual(cached.cache_info(), (0, 0, None, 0)) self.assertEqual(cached(1), 1) self.assertEqual(cached.cache_info(), (0, 1, None, 1)) @@ -62,9 +53,7 @@ class DecoratorTestMixin(object): def test_decorator_typed(self): cached = self.decorator(maxsize=2, typed=True)(lambda n: n) - self.assertEqual(cached.cache_parameters(), { - 'maxsize': 2, 'typed': True - }) + self.assertEqual(cached.cache_parameters(), {"maxsize": 2, "typed": True}) self.assertEqual(cached.cache_info(), (0, 0, 2, 0)) self.assertEqual(cached(1), 1) self.assertEqual(cached.cache_info(), (0, 1, 2, 1)) @@ -77,9 +66,7 @@ class DecoratorTestMixin(object): def test_decorator_user_function(self): cached = self.decorator(lambda n: n) - self.assertEqual(cached.cache_parameters(), { - 'maxsize': 128, 'typed': False - }) + self.assertEqual(cached.cache_parameters(), {"maxsize": 128, "typed": False}) self.assertEqual(cached.cache_info(), (0, 0, 128, 0)) self.assertEqual(cached(1), 1) self.assertEqual(cached.cache_info(), (0, 1, 128, 1)) diff --git a/tests/test_keys.py b/tests/test_keys.py index 8137704..892a620 100644 --- a/tests/test_keys.py +++ b/tests/test_keys.py @@ -4,7 +4,6 @@ import cachetools.keys class CacheKeysTest(unittest.TestCase): - def test_hashkey(self, key=cachetools.keys.hashkey): self.assertEqual(key(), key()) self.assertEqual(hash(key()), hash(key())) @@ -47,7 +46,7 @@ class CacheKeysTest(unittest.TestCase): def test_pickle(self, key=cachetools.keys.hashkey): import pickle - for k in [key(), key('abc'), key('abc', 123), key('abc', q='abc')]: + for k in [key(), key("abc"), key("abc", 123), key("abc", q="abc")]: # white-box test: assert cached hash value is not pickled self.assertEqual(len(k.__dict__), 0) h = hash(k) diff --git a/tests/test_method.py b/tests/test_method.py index 9252fef..235eca4 100644 --- a/tests/test_method.py +++ b/tests/test_method.py @@ -5,18 +5,17 @@ from cachetools import LRUCache, cachedmethod, keys class Cached(object): - def __init__(self, cache, count=0): self.cache = cache self.count = count - @cachedmethod(operator.attrgetter('cache')) + @cachedmethod(operator.attrgetter("cache")) def get(self, value): count = self.count self.count += 1 return count - @cachedmethod(operator.attrgetter('cache'), key=keys.typedkey) + @cachedmethod(operator.attrgetter("cache"), key=keys.typedkey) def get_typed(self, value): count = self.count self.count += 1 @@ -24,16 +23,15 @@ class Cached(object): # https://github.com/tkem/cachetools/issues/107 def __hash__(self): - raise TypeError('unhashable type') + raise TypeError("unhashable type") class Locked(object): - def __init__(self, cache): self.cache = cache self.count = 0 - @cachedmethod(operator.attrgetter('cache'), lock=lambda self: self) + @cachedmethod(operator.attrgetter("cache"), lock=lambda self: self) def get(self, value): return self.count @@ -45,7 +43,6 @@ class Locked(object): class CachedMethodTest(unittest.TestCase): - def test_dict(self): cached = Cached({}) diff --git a/tests/test_mru.py b/tests/test_mru.py index 3a9f4d3..d11dba4 100644 --- a/tests/test_mru.py +++ b/tests/test_mru.py @@ -17,7 +17,7 @@ class MRUCacheTest(unittest.TestCase, CacheTestMixin): cache[3] = 3 # Evicts 1 because nothing's been used yet assert len(cache) == 2 - assert 1 not in cache, 'Wrong key was evicted. Should have been `1`.' + assert 1 not in cache, "Wrong key was evicted. Should have been '1'." assert 2 in cache assert 3 in cache @@ -29,7 +29,7 @@ class MRUCacheTest(unittest.TestCase, CacheTestMixin): cache[1] cache[2] cache[3] = 3 # Evicts 2 - assert 2 not in cache, 'Wrong key was evicted. Should have been `2`.' + assert 2 not in cache, "Wrong key was evicted. Should have been '2'." assert 1 in cache assert 3 in cache diff --git a/tests/test_wrapper.py b/tests/test_wrapper.py index a6e649c..e154ff8 100644 --- a/tests/test_wrapper.py +++ b/tests/test_wrapper.py @@ -5,12 +5,11 @@ import cachetools.keys class DecoratorTestMixin(object): - def cache(self, minsize): raise NotImplementedError def func(self, *args, **kwargs): - if hasattr(self, 'count'): + if hasattr(self, "count"): self.count += 1 else: self.count = 0 @@ -101,7 +100,6 @@ class DecoratorTestMixin(object): class CacheWrapperTest(unittest.TestCase, DecoratorTestMixin): - def cache(self, minsize): return cachetools.Cache(maxsize=minsize) @@ -138,13 +136,11 @@ class CacheWrapperTest(unittest.TestCase, DecoratorTestMixin): class DictWrapperTest(unittest.TestCase, DecoratorTestMixin): - def cache(self, minsize): return dict() class NoneWrapperTest(unittest.TestCase): - def func(self, *args, **kwargs): return args + tuple(kwargs.items()) @@ -154,4 +150,4 @@ class NoneWrapperTest(unittest.TestCase): self.assertEqual(wrapper(0), (0,)) self.assertEqual(wrapper(1), (1,)) - self.assertEqual(wrapper(1, foo='bar'), (1, ('foo', 'bar'))) + self.assertEqual(wrapper(1, foo="bar"), (1, ("foo", "bar"))) diff --git a/tox.ini b/tox.ini index 2ae20e1..e2315db 100644 --- a/tox.ini +++ b/tox.ini @@ -31,6 +31,8 @@ commands = [testenv:flake8] deps = flake8 + flake8-black; python_version >= "3.6" and implementation_name == "cpython" + flake8-bugbear flake8-import-order commands = flake8 -- cgit v1.2.3 From a34c227c26070f546b4201745d826b3266801c1f Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Tue, 27 Apr 2021 07:31:10 +0200 Subject: Remove Python 2 remnants. --- cachetools/cache.py | 2 +- cachetools/ttl.py | 6 +++--- tests/__init__.py | 2 +- tests/test_func.py | 2 +- tests/test_method.py | 4 ++-- tests/test_wrapper.py | 6 +++--- 6 files changed, 11 insertions(+), 11 deletions(-) diff --git a/cachetools/cache.py b/cachetools/cache.py index 0c81d06..973d50b 100644 --- a/cachetools/cache.py +++ b/cachetools/cache.py @@ -1,7 +1,7 @@ from collections.abc import MutableMapping -class _DefaultSize(object): +class _DefaultSize: __slots__ = () diff --git a/cachetools/ttl.py b/cachetools/ttl.py index 72f6d52..eef8877 100644 --- a/cachetools/ttl.py +++ b/cachetools/ttl.py @@ -4,7 +4,7 @@ import time from .cache import Cache -class _Link(object): +class _Link: __slots__ = ("key", "expire", "next", "prev") @@ -22,7 +22,7 @@ class _Link(object): next.prev = prev -class _Timer(object): +class _Timer: def __init__(self, timer): self.__timer = timer self.__nesting = 0 @@ -143,7 +143,7 @@ class TTLCache(Cache): def currsize(self): with self.__timer as time: self.expire(time) - return super(TTLCache, self).currsize + return super().currsize @property def timer(self): diff --git a/tests/__init__.py b/tests/__init__.py index bfd5ec6..35ac81a 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -2,7 +2,7 @@ import sys import unittest -class CacheTestMixin(object): +class CacheTestMixin: Cache = None diff --git a/tests/test_func.py b/tests/test_func.py index 721d5a6..72e7589 100644 --- a/tests/test_func.py +++ b/tests/test_func.py @@ -3,7 +3,7 @@ import unittest import cachetools.func -class DecoratorTestMixin(object): +class DecoratorTestMixin: def decorator(self, maxsize, **kwargs): return self.DECORATOR(maxsize, **kwargs) diff --git a/tests/test_method.py b/tests/test_method.py index 235eca4..b41dac0 100644 --- a/tests/test_method.py +++ b/tests/test_method.py @@ -4,7 +4,7 @@ import unittest from cachetools import LRUCache, cachedmethod, keys -class Cached(object): +class Cached: def __init__(self, cache, count=0): self.cache = cache self.count = count @@ -26,7 +26,7 @@ class Cached(object): raise TypeError("unhashable type") -class Locked(object): +class Locked: def __init__(self, cache): self.cache = cache self.count = 0 diff --git a/tests/test_wrapper.py b/tests/test_wrapper.py index e154ff8..37af16b 100644 --- a/tests/test_wrapper.py +++ b/tests/test_wrapper.py @@ -4,7 +4,7 @@ import cachetools import cachetools.keys -class DecoratorTestMixin(object): +class DecoratorTestMixin: def cache(self, minsize): raise NotImplementedError @@ -76,7 +76,7 @@ class DecoratorTestMixin(object): self.assertEqual(len(cache), 3) def test_decorator_lock(self): - class Lock(object): + class Lock: count = 0 @@ -114,7 +114,7 @@ class CacheWrapperTest(unittest.TestCase, DecoratorTestMixin): self.assertEqual(len(cache), 0) def test_zero_size_cache_decorator_lock(self): - class Lock(object): + class Lock: count = 0 -- cgit v1.2.3 From f8fbd6da171de6b058c22497fd7dca0d86928839 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Tue, 27 Apr 2021 18:03:40 +0200 Subject: Fix copyright year. --- LICENSE | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LICENSE b/LICENSE index 0dc1864..fc2146e 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ The MIT License (MIT) -Copyright (c) 2014-2020 Thomas Kemmer +Copyright (c) 2014-2021 Thomas Kemmer Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in -- cgit v1.2.3 From 63f28698a99ddfb1f2abc95b2b51e48867d92e17 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Tue, 27 Apr 2021 18:04:03 +0200 Subject: Add pyproject.toml --- pyproject.toml | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 pyproject.toml diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..f87c03a --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,3 @@ +[build-system] +requires = ["setuptools >= 46.4.0", "wheel"] +build-backend = "setuptools.build_meta" -- cgit v1.2.3 From 14858da844668605b128794436d4767c5ab55835 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Tue, 27 Apr 2021 18:04:47 +0200 Subject: Single source package version. --- setup.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.cfg b/setup.cfg index bd7fb73..eabb604 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,6 +1,6 @@ [metadata] name = cachetools -version = 4.2.1 +version = attr: cachetools.__version__ url = https://github.com/tkem/cachetools/ author = Thomas Kemmer author_email = tkemmer@computer.org -- cgit v1.2.3 From 6b56d1f2c91b145728afedb22a394bca679b82ed Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Tue, 27 Apr 2021 18:10:34 +0200 Subject: Add src directory. --- cachetools/__init__.py | 24 ----- cachetools/cache.py | 117 ------------------------ cachetools/decorators.py | 102 --------------------- cachetools/fifo.py | 31 ------- cachetools/func.py | 176 ------------------------------------ cachetools/keys.py | 52 ----------- cachetools/lfu.py | 34 ------- cachetools/lru.py | 40 --------- cachetools/mru.py | 40 --------- cachetools/rr.py | 34 ------- cachetools/ttl.py | 207 ------------------------------------------- setup.cfg | 6 +- src/cachetools/__init__.py | 24 +++++ src/cachetools/cache.py | 117 ++++++++++++++++++++++++ src/cachetools/decorators.py | 102 +++++++++++++++++++++ src/cachetools/fifo.py | 31 +++++++ src/cachetools/func.py | 176 ++++++++++++++++++++++++++++++++++++ src/cachetools/keys.py | 52 +++++++++++ src/cachetools/lfu.py | 34 +++++++ src/cachetools/lru.py | 40 +++++++++ src/cachetools/mru.py | 40 +++++++++ src/cachetools/rr.py | 34 +++++++ src/cachetools/ttl.py | 207 +++++++++++++++++++++++++++++++++++++++++++ 23 files changed, 860 insertions(+), 860 deletions(-) delete mode 100644 cachetools/__init__.py delete mode 100644 cachetools/cache.py delete mode 100644 cachetools/decorators.py delete mode 100644 cachetools/fifo.py delete mode 100644 cachetools/func.py delete mode 100644 cachetools/keys.py delete mode 100644 cachetools/lfu.py delete mode 100644 cachetools/lru.py delete mode 100644 cachetools/mru.py delete mode 100644 cachetools/rr.py delete mode 100644 cachetools/ttl.py create mode 100644 src/cachetools/__init__.py create mode 100644 src/cachetools/cache.py create mode 100644 src/cachetools/decorators.py create mode 100644 src/cachetools/fifo.py create mode 100644 src/cachetools/func.py create mode 100644 src/cachetools/keys.py create mode 100644 src/cachetools/lfu.py create mode 100644 src/cachetools/lru.py create mode 100644 src/cachetools/mru.py create mode 100644 src/cachetools/rr.py create mode 100644 src/cachetools/ttl.py diff --git a/cachetools/__init__.py b/cachetools/__init__.py deleted file mode 100644 index 1c925cc..0000000 --- a/cachetools/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -"""Extensible memoizing collections and decorators.""" - -from .cache import Cache -from .decorators import cached, cachedmethod -from .fifo import FIFOCache -from .lfu import LFUCache -from .lru import LRUCache -from .mru import MRUCache -from .rr import RRCache -from .ttl import TTLCache - -__all__ = ( - "Cache", - "FIFOCache", - "LFUCache", - "LRUCache", - "MRUCache", - "RRCache", - "TTLCache", - "cached", - "cachedmethod", -) - -__version__ = "4.2.1" diff --git a/cachetools/cache.py b/cachetools/cache.py deleted file mode 100644 index 973d50b..0000000 --- a/cachetools/cache.py +++ /dev/null @@ -1,117 +0,0 @@ -from collections.abc import MutableMapping - - -class _DefaultSize: - - __slots__ = () - - def __getitem__(self, _): - return 1 - - def __setitem__(self, _, value): - assert value == 1 - - def pop(self, _): - return 1 - - -class Cache(MutableMapping): - """Mutable mapping to serve as a simple cache or cache base class.""" - - __marker = object() - - __size = _DefaultSize() - - def __init__(self, maxsize, getsizeof=None): - if getsizeof: - self.getsizeof = getsizeof - if self.getsizeof is not Cache.getsizeof: - self.__size = dict() - self.__data = dict() - self.__currsize = 0 - self.__maxsize = maxsize - - def __repr__(self): - return "%s(%r, maxsize=%r, currsize=%r)" % ( - self.__class__.__name__, - list(self.__data.items()), - self.__maxsize, - self.__currsize, - ) - - def __getitem__(self, key): - try: - return self.__data[key] - except KeyError: - return self.__missing__(key) - - def __setitem__(self, key, value): - maxsize = self.__maxsize - size = self.getsizeof(value) - if size > maxsize: - raise ValueError("value too large") - if key not in self.__data or self.__size[key] < size: - while self.__currsize + size > maxsize: - self.popitem() - if key in self.__data: - diffsize = size - self.__size[key] - else: - diffsize = size - self.__data[key] = value - self.__size[key] = size - self.__currsize += diffsize - - def __delitem__(self, key): - size = self.__size.pop(key) - del self.__data[key] - self.__currsize -= size - - def __contains__(self, key): - return key in self.__data - - def __missing__(self, key): - raise KeyError(key) - - def __iter__(self): - return iter(self.__data) - - def __len__(self): - return len(self.__data) - - def get(self, key, default=None): - if key in self: - return self[key] - else: - return default - - def pop(self, key, default=__marker): - if key in self: - value = self[key] - del self[key] - elif default is self.__marker: - raise KeyError(key) - else: - value = default - return value - - def setdefault(self, key, default=None): - if key in self: - value = self[key] - else: - self[key] = value = default - return value - - @property - def maxsize(self): - """The maximum size of the cache.""" - return self.__maxsize - - @property - def currsize(self): - """The current size of the cache.""" - return self.__currsize - - @staticmethod - def getsizeof(value): - """Return the size of a cache element's value.""" - return 1 diff --git a/cachetools/decorators.py b/cachetools/decorators.py deleted file mode 100644 index 3e78603..0000000 --- a/cachetools/decorators.py +++ /dev/null @@ -1,102 +0,0 @@ -import functools - -from .keys import hashkey - - -def cached(cache, key=hashkey, lock=None): - """Decorator to wrap a function with a memoizing callable that saves - results in a cache. - - """ - - def decorator(func): - if cache is None: - - def wrapper(*args, **kwargs): - return func(*args, **kwargs) - - elif lock is None: - - def wrapper(*args, **kwargs): - k = key(*args, **kwargs) - try: - return cache[k] - except KeyError: - pass # key not found - v = func(*args, **kwargs) - try: - cache[k] = v - except ValueError: - pass # value too large - return v - - else: - - def wrapper(*args, **kwargs): - k = key(*args, **kwargs) - try: - with lock: - return cache[k] - except KeyError: - pass # key not found - v = func(*args, **kwargs) - # in case of a race, prefer the item already in the cache - try: - with lock: - return cache.setdefault(k, v) - except ValueError: - return v # value too large - - return functools.update_wrapper(wrapper, func) - - return decorator - - -def cachedmethod(cache, key=hashkey, lock=None): - """Decorator to wrap a class or instance method with a memoizing - callable that saves results in a cache. - - """ - - def decorator(method): - if lock is None: - - def wrapper(self, *args, **kwargs): - c = cache(self) - if c is None: - return method(self, *args, **kwargs) - k = key(*args, **kwargs) - try: - return c[k] - except KeyError: - pass # key not found - v = method(self, *args, **kwargs) - try: - c[k] = v - except ValueError: - pass # value too large - return v - - else: - - def wrapper(self, *args, **kwargs): - c = cache(self) - if c is None: - return method(self, *args, **kwargs) - k = key(*args, **kwargs) - try: - with lock(self): - return c[k] - except KeyError: - pass # key not found - v = method(self, *args, **kwargs) - # in case of a race, prefer the item already in the cache - try: - with lock(self): - return c.setdefault(k, v) - except ValueError: - return v # value too large - - return functools.update_wrapper(wrapper, method) - - return decorator diff --git a/cachetools/fifo.py b/cachetools/fifo.py deleted file mode 100644 index e7c377e..0000000 --- a/cachetools/fifo.py +++ /dev/null @@ -1,31 +0,0 @@ -import collections - -from .cache import Cache - - -class FIFOCache(Cache): - """First In First Out (FIFO) cache implementation.""" - - def __init__(self, maxsize, getsizeof=None): - Cache.__init__(self, maxsize, getsizeof) - self.__order = collections.OrderedDict() - - def __setitem__(self, key, value, cache_setitem=Cache.__setitem__): - cache_setitem(self, key, value) - try: - self.__order.move_to_end(key) - except KeyError: - self.__order[key] = None - - def __delitem__(self, key, cache_delitem=Cache.__delitem__): - cache_delitem(self, key) - del self.__order[key] - - def popitem(self): - """Remove and return the `(key, value)` pair first inserted.""" - try: - key = next(iter(self.__order)) - except StopIteration: - raise KeyError("%s is empty" % type(self).__name__) from None - else: - return (key, self.pop(key)) diff --git a/cachetools/func.py b/cachetools/func.py deleted file mode 100644 index 57fb72d..0000000 --- a/cachetools/func.py +++ /dev/null @@ -1,176 +0,0 @@ -"""`functools.lru_cache` compatible memoizing function decorators.""" - -import collections -import functools -import math -import random -import time - -try: - from threading import RLock -except ImportError: # pragma: no cover - from dummy_threading import RLock - -from . import keys -from .fifo import FIFOCache -from .lfu import LFUCache -from .lru import LRUCache -from .mru import MRUCache -from .rr import RRCache -from .ttl import TTLCache - -__all__ = ("lfu_cache", "lru_cache", "mru_cache", "rr_cache", "ttl_cache") - - -_CacheInfo = collections.namedtuple( - "CacheInfo", ["hits", "misses", "maxsize", "currsize"] -) - - -class _UnboundCache(dict): - @property - def maxsize(self): - return None - - @property - def currsize(self): - return len(self) - - -class _UnboundTTLCache(TTLCache): - def __init__(self, ttl, timer): - TTLCache.__init__(self, math.inf, ttl, timer) - - @property - def maxsize(self): - return None - - -def _cache(cache, typed): - maxsize = cache.maxsize - - def decorator(func): - key = keys.typedkey if typed else keys.hashkey - lock = RLock() - stats = [0, 0] - - def wrapper(*args, **kwargs): - k = key(*args, **kwargs) - with lock: - try: - v = cache[k] - stats[0] += 1 - return v - except KeyError: - stats[1] += 1 - v = func(*args, **kwargs) - # in case of a race, prefer the item already in the cache - try: - with lock: - return cache.setdefault(k, v) - except ValueError: - return v # value too large - - def cache_info(): - with lock: - hits, misses = stats - maxsize = cache.maxsize - currsize = cache.currsize - return _CacheInfo(hits, misses, maxsize, currsize) - - def cache_clear(): - with lock: - try: - cache.clear() - finally: - stats[:] = [0, 0] - - wrapper.cache_info = cache_info - wrapper.cache_clear = cache_clear - wrapper.cache_parameters = lambda: {"maxsize": maxsize, "typed": typed} - functools.update_wrapper(wrapper, func) - return wrapper - - return decorator - - -def fifo_cache(maxsize=128, typed=False): - """Decorator to wrap a function with a memoizing callable that saves - up to `maxsize` results based on a First In First Out (FIFO) - algorithm. - - """ - if maxsize is None: - return _cache(_UnboundCache(), typed) - elif callable(maxsize): - return _cache(FIFOCache(128), typed)(maxsize) - else: - return _cache(FIFOCache(maxsize), typed) - - -def lfu_cache(maxsize=128, typed=False): - """Decorator to wrap a function with a memoizing callable that saves - up to `maxsize` results based on a Least Frequently Used (LFU) - algorithm. - - """ - if maxsize is None: - return _cache(_UnboundCache(), typed) - elif callable(maxsize): - return _cache(LFUCache(128), typed)(maxsize) - else: - return _cache(LFUCache(maxsize), typed) - - -def lru_cache(maxsize=128, typed=False): - """Decorator to wrap a function with a memoizing callable that saves - up to `maxsize` results based on a Least Recently Used (LRU) - algorithm. - - """ - if maxsize is None: - return _cache(_UnboundCache(), typed) - elif callable(maxsize): - return _cache(LRUCache(128), typed)(maxsize) - else: - return _cache(LRUCache(maxsize), typed) - - -def mru_cache(maxsize=128, typed=False): - """Decorator to wrap a function with a memoizing callable that saves - up to `maxsize` results based on a Most Recently Used (MRU) - algorithm. - """ - if maxsize is None: - return _cache(_UnboundCache(), typed) - elif callable(maxsize): - return _cache(MRUCache(128), typed)(maxsize) - else: - return _cache(MRUCache(maxsize), typed) - - -def rr_cache(maxsize=128, choice=random.choice, typed=False): - """Decorator to wrap a function with a memoizing callable that saves - up to `maxsize` results based on a Random Replacement (RR) - algorithm. - - """ - if maxsize is None: - return _cache(_UnboundCache(), typed) - elif callable(maxsize): - return _cache(RRCache(128, choice), typed)(maxsize) - else: - return _cache(RRCache(maxsize, choice), typed) - - -def ttl_cache(maxsize=128, ttl=600, timer=time.monotonic, typed=False): - """Decorator to wrap a function with a memoizing callable that saves - up to `maxsize` results based on a Least Recently Used (LRU) - algorithm with a per-item time-to-live (TTL) value. - """ - if maxsize is None: - return _cache(_UnboundTTLCache(ttl, timer), typed) - elif callable(maxsize): - return _cache(TTLCache(128, ttl, timer), typed)(maxsize) - else: - return _cache(TTLCache(maxsize, ttl, timer), typed) diff --git a/cachetools/keys.py b/cachetools/keys.py deleted file mode 100644 index 13630a4..0000000 --- a/cachetools/keys.py +++ /dev/null @@ -1,52 +0,0 @@ -"""Key functions for memoizing decorators.""" - -__all__ = ("hashkey", "typedkey") - - -class _HashedTuple(tuple): - """A tuple that ensures that hash() will be called no more than once - per element, since cache decorators will hash the key multiple - times on a cache miss. See also _HashedSeq in the standard - library functools implementation. - - """ - - __hashvalue = None - - def __hash__(self, hash=tuple.__hash__): - hashvalue = self.__hashvalue - if hashvalue is None: - self.__hashvalue = hashvalue = hash(self) - return hashvalue - - def __add__(self, other, add=tuple.__add__): - return _HashedTuple(add(self, other)) - - def __radd__(self, other, add=tuple.__add__): - return _HashedTuple(add(other, self)) - - def __getstate__(self): - return {} - - -# used for separating keyword arguments; we do not use an object -# instance here so identity is preserved when pickling/unpickling -_kwmark = (_HashedTuple,) - - -def hashkey(*args, **kwargs): - """Return a cache key for the specified hashable arguments.""" - - if kwargs: - return _HashedTuple(args + sum(sorted(kwargs.items()), _kwmark)) - else: - return _HashedTuple(args) - - -def typedkey(*args, **kwargs): - """Return a typed cache key for the specified hashable arguments.""" - - key = hashkey(*args, **kwargs) - key += tuple(type(v) for v in args) - key += tuple(type(v) for _, v in sorted(kwargs.items())) - return key diff --git a/cachetools/lfu.py b/cachetools/lfu.py deleted file mode 100644 index 6289b5c..0000000 --- a/cachetools/lfu.py +++ /dev/null @@ -1,34 +0,0 @@ -import collections - -from .cache import Cache - - -class LFUCache(Cache): - """Least Frequently Used (LFU) cache implementation.""" - - def __init__(self, maxsize, getsizeof=None): - Cache.__init__(self, maxsize, getsizeof) - self.__counter = collections.Counter() - - def __getitem__(self, key, cache_getitem=Cache.__getitem__): - value = cache_getitem(self, key) - if key in self: # __missing__ may not store item - self.__counter[key] -= 1 - return value - - def __setitem__(self, key, value, cache_setitem=Cache.__setitem__): - cache_setitem(self, key, value) - self.__counter[key] -= 1 - - def __delitem__(self, key, cache_delitem=Cache.__delitem__): - cache_delitem(self, key) - del self.__counter[key] - - def popitem(self): - """Remove and return the `(key, value)` pair least frequently used.""" - try: - ((key, _),) = self.__counter.most_common(1) - except ValueError: - raise KeyError("%s is empty" % type(self).__name__) from None - else: - return (key, self.pop(key)) diff --git a/cachetools/lru.py b/cachetools/lru.py deleted file mode 100644 index dbbe787..0000000 --- a/cachetools/lru.py +++ /dev/null @@ -1,40 +0,0 @@ -import collections - -from .cache import Cache - - -class LRUCache(Cache): - """Least Recently Used (LRU) cache implementation.""" - - def __init__(self, maxsize, getsizeof=None): - Cache.__init__(self, maxsize, getsizeof) - self.__order = collections.OrderedDict() - - def __getitem__(self, key, cache_getitem=Cache.__getitem__): - value = cache_getitem(self, key) - if key in self: # __missing__ may not store item - self.__update(key) - return value - - def __setitem__(self, key, value, cache_setitem=Cache.__setitem__): - cache_setitem(self, key, value) - self.__update(key) - - def __delitem__(self, key, cache_delitem=Cache.__delitem__): - cache_delitem(self, key) - del self.__order[key] - - def popitem(self): - """Remove and return the `(key, value)` pair least recently used.""" - try: - key = next(iter(self.__order)) - except StopIteration: - raise KeyError("%s is empty" % type(self).__name__) from None - else: - return (key, self.pop(key)) - - def __update(self, key): - try: - self.__order.move_to_end(key) - except KeyError: - self.__order[key] = None diff --git a/cachetools/mru.py b/cachetools/mru.py deleted file mode 100644 index 92ec6db..0000000 --- a/cachetools/mru.py +++ /dev/null @@ -1,40 +0,0 @@ -import collections - -from cachetools.cache import Cache - - -class MRUCache(Cache): - """Most Recently Used (MRU) cache implementation.""" - - def __init__(self, maxsize, getsizeof=None): - Cache.__init__(self, maxsize, getsizeof) - self.__order = collections.OrderedDict() - - def __getitem__(self, key, cache_getitem=Cache.__getitem__): - value = cache_getitem(self, key) - if key in self: # __missing__ may not store item - self.__update(key) - return value - - def __setitem__(self, key, value, cache_setitem=Cache.__setitem__): - cache_setitem(self, key, value) - self.__update(key) - - def __delitem__(self, key, cache_delitem=Cache.__delitem__): - cache_delitem(self, key) - del self.__order[key] - - def popitem(self): - """Remove and return the `(key, value)` pair most recently used.""" - try: - key = next(iter(self.__order)) - except StopIteration: - raise KeyError("%s is empty" % type(self).__name__) from None - else: - return (key, self.pop(key)) - - def __update(self, key): - try: - self.__order.move_to_end(key, last=False) - except KeyError: - self.__order[key] = None diff --git a/cachetools/rr.py b/cachetools/rr.py deleted file mode 100644 index 561dbe5..0000000 --- a/cachetools/rr.py +++ /dev/null @@ -1,34 +0,0 @@ -import random - -from .cache import Cache - - -# random.choice cannot be pickled in Python 2.7 -def _choice(seq): - return random.choice(seq) - - -class RRCache(Cache): - """Random Replacement (RR) cache implementation.""" - - def __init__(self, maxsize, choice=random.choice, getsizeof=None): - Cache.__init__(self, maxsize, getsizeof) - # TODO: use None as default, assing to self.choice directly? - if choice is random.choice: - self.__choice = _choice - else: - self.__choice = choice - - @property - def choice(self): - """The `choice` function used by the cache.""" - return self.__choice - - def popitem(self): - """Remove and return a random `(key, value)` pair.""" - try: - key = self.__choice(list(self)) - except IndexError: - raise KeyError("%s is empty" % type(self).__name__) from None - else: - return (key, self.pop(key)) diff --git a/cachetools/ttl.py b/cachetools/ttl.py deleted file mode 100644 index eef8877..0000000 --- a/cachetools/ttl.py +++ /dev/null @@ -1,207 +0,0 @@ -import collections -import time - -from .cache import Cache - - -class _Link: - - __slots__ = ("key", "expire", "next", "prev") - - def __init__(self, key=None, expire=None): - self.key = key - self.expire = expire - - def __reduce__(self): - return _Link, (self.key, self.expire) - - def unlink(self): - next = self.next - prev = self.prev - prev.next = next - next.prev = prev - - -class _Timer: - def __init__(self, timer): - self.__timer = timer - self.__nesting = 0 - - def __call__(self): - if self.__nesting == 0: - return self.__timer() - else: - return self.__time - - def __enter__(self): - if self.__nesting == 0: - self.__time = time = self.__timer() - else: - time = self.__time - self.__nesting += 1 - return time - - def __exit__(self, *exc): - self.__nesting -= 1 - - def __reduce__(self): - return _Timer, (self.__timer,) - - def __getattr__(self, name): - return getattr(self.__timer, name) - - -class TTLCache(Cache): - """LRU Cache implementation with per-item time-to-live (TTL) value.""" - - def __init__(self, maxsize, ttl, timer=time.monotonic, getsizeof=None): - Cache.__init__(self, maxsize, getsizeof) - self.__root = root = _Link() - root.prev = root.next = root - self.__links = collections.OrderedDict() - self.__timer = _Timer(timer) - self.__ttl = ttl - - def __contains__(self, key): - try: - link = self.__links[key] # no reordering - except KeyError: - return False - else: - return not (link.expire < self.__timer()) - - def __getitem__(self, key, cache_getitem=Cache.__getitem__): - try: - link = self.__getlink(key) - except KeyError: - expired = False - else: - expired = link.expire < self.__timer() - if expired: - return self.__missing__(key) - else: - return cache_getitem(self, key) - - def __setitem__(self, key, value, cache_setitem=Cache.__setitem__): - with self.__timer as time: - self.expire(time) - cache_setitem(self, key, value) - try: - link = self.__getlink(key) - except KeyError: - self.__links[key] = link = _Link(key) - else: - link.unlink() - link.expire = time + self.__ttl - link.next = root = self.__root - link.prev = prev = root.prev - prev.next = root.prev = link - - def __delitem__(self, key, cache_delitem=Cache.__delitem__): - cache_delitem(self, key) - link = self.__links.pop(key) - link.unlink() - if link.expire < self.__timer(): - raise KeyError(key) - - def __iter__(self): - root = self.__root - curr = root.next - while curr is not root: - # "freeze" time for iterator access - with self.__timer as time: - if not (curr.expire < time): - yield curr.key - curr = curr.next - - def __len__(self): - root = self.__root - curr = root.next - time = self.__timer() - count = len(self.__links) - while curr is not root and curr.expire < time: - count -= 1 - curr = curr.next - return count - - def __setstate__(self, state): - self.__dict__.update(state) - root = self.__root - root.prev = root.next = root - for link in sorted(self.__links.values(), key=lambda obj: obj.expire): - link.next = root - link.prev = prev = root.prev - prev.next = root.prev = link - self.expire(self.__timer()) - - def __repr__(self, cache_repr=Cache.__repr__): - with self.__timer as time: - self.expire(time) - return cache_repr(self) - - @property - def currsize(self): - with self.__timer as time: - self.expire(time) - return super().currsize - - @property - def timer(self): - """The timer function used by the cache.""" - return self.__timer - - @property - def ttl(self): - """The time-to-live value of the cache's items.""" - return self.__ttl - - def expire(self, time=None): - """Remove expired items from the cache.""" - if time is None: - time = self.__timer() - root = self.__root - curr = root.next - links = self.__links - cache_delitem = Cache.__delitem__ - while curr is not root and curr.expire < time: - cache_delitem(self, curr.key) - del links[curr.key] - next = curr.next - curr.unlink() - curr = next - - def clear(self): - with self.__timer as time: - self.expire(time) - Cache.clear(self) - - def get(self, *args, **kwargs): - with self.__timer: - return Cache.get(self, *args, **kwargs) - - def pop(self, *args, **kwargs): - with self.__timer: - return Cache.pop(self, *args, **kwargs) - - def setdefault(self, *args, **kwargs): - with self.__timer: - return Cache.setdefault(self, *args, **kwargs) - - def popitem(self): - """Remove and return the `(key, value)` pair least recently used that - has not already expired. - - """ - with self.__timer as time: - self.expire(time) - try: - key = next(iter(self.__links)) - except StopIteration: - raise KeyError("%s is empty" % type(self).__name__) from None - else: - return (key, self.pop(key)) - - def __getlink(self, key): - value = self.__links[key] - self.__links.move_to_end(key) - return value diff --git a/setup.cfg b/setup.cfg index eabb604..9f3f926 100644 --- a/setup.cfg +++ b/setup.cfg @@ -24,13 +24,13 @@ classifiers = Topic :: Software Development :: Libraries :: Python Modules [options] +package_dir = + = src packages = find: python_requires = ~= 3.5 [options.packages.find] -exclude = - tests - tests.* +where = src [flake8] max-line-length = 80 diff --git a/src/cachetools/__init__.py b/src/cachetools/__init__.py new file mode 100644 index 0000000..1c925cc --- /dev/null +++ b/src/cachetools/__init__.py @@ -0,0 +1,24 @@ +"""Extensible memoizing collections and decorators.""" + +from .cache import Cache +from .decorators import cached, cachedmethod +from .fifo import FIFOCache +from .lfu import LFUCache +from .lru import LRUCache +from .mru import MRUCache +from .rr import RRCache +from .ttl import TTLCache + +__all__ = ( + "Cache", + "FIFOCache", + "LFUCache", + "LRUCache", + "MRUCache", + "RRCache", + "TTLCache", + "cached", + "cachedmethod", +) + +__version__ = "4.2.1" diff --git a/src/cachetools/cache.py b/src/cachetools/cache.py new file mode 100644 index 0000000..973d50b --- /dev/null +++ b/src/cachetools/cache.py @@ -0,0 +1,117 @@ +from collections.abc import MutableMapping + + +class _DefaultSize: + + __slots__ = () + + def __getitem__(self, _): + return 1 + + def __setitem__(self, _, value): + assert value == 1 + + def pop(self, _): + return 1 + + +class Cache(MutableMapping): + """Mutable mapping to serve as a simple cache or cache base class.""" + + __marker = object() + + __size = _DefaultSize() + + def __init__(self, maxsize, getsizeof=None): + if getsizeof: + self.getsizeof = getsizeof + if self.getsizeof is not Cache.getsizeof: + self.__size = dict() + self.__data = dict() + self.__currsize = 0 + self.__maxsize = maxsize + + def __repr__(self): + return "%s(%r, maxsize=%r, currsize=%r)" % ( + self.__class__.__name__, + list(self.__data.items()), + self.__maxsize, + self.__currsize, + ) + + def __getitem__(self, key): + try: + return self.__data[key] + except KeyError: + return self.__missing__(key) + + def __setitem__(self, key, value): + maxsize = self.__maxsize + size = self.getsizeof(value) + if size > maxsize: + raise ValueError("value too large") + if key not in self.__data or self.__size[key] < size: + while self.__currsize + size > maxsize: + self.popitem() + if key in self.__data: + diffsize = size - self.__size[key] + else: + diffsize = size + self.__data[key] = value + self.__size[key] = size + self.__currsize += diffsize + + def __delitem__(self, key): + size = self.__size.pop(key) + del self.__data[key] + self.__currsize -= size + + def __contains__(self, key): + return key in self.__data + + def __missing__(self, key): + raise KeyError(key) + + def __iter__(self): + return iter(self.__data) + + def __len__(self): + return len(self.__data) + + def get(self, key, default=None): + if key in self: + return self[key] + else: + return default + + def pop(self, key, default=__marker): + if key in self: + value = self[key] + del self[key] + elif default is self.__marker: + raise KeyError(key) + else: + value = default + return value + + def setdefault(self, key, default=None): + if key in self: + value = self[key] + else: + self[key] = value = default + return value + + @property + def maxsize(self): + """The maximum size of the cache.""" + return self.__maxsize + + @property + def currsize(self): + """The current size of the cache.""" + return self.__currsize + + @staticmethod + def getsizeof(value): + """Return the size of a cache element's value.""" + return 1 diff --git a/src/cachetools/decorators.py b/src/cachetools/decorators.py new file mode 100644 index 0000000..3e78603 --- /dev/null +++ b/src/cachetools/decorators.py @@ -0,0 +1,102 @@ +import functools + +from .keys import hashkey + + +def cached(cache, key=hashkey, lock=None): + """Decorator to wrap a function with a memoizing callable that saves + results in a cache. + + """ + + def decorator(func): + if cache is None: + + def wrapper(*args, **kwargs): + return func(*args, **kwargs) + + elif lock is None: + + def wrapper(*args, **kwargs): + k = key(*args, **kwargs) + try: + return cache[k] + except KeyError: + pass # key not found + v = func(*args, **kwargs) + try: + cache[k] = v + except ValueError: + pass # value too large + return v + + else: + + def wrapper(*args, **kwargs): + k = key(*args, **kwargs) + try: + with lock: + return cache[k] + except KeyError: + pass # key not found + v = func(*args, **kwargs) + # in case of a race, prefer the item already in the cache + try: + with lock: + return cache.setdefault(k, v) + except ValueError: + return v # value too large + + return functools.update_wrapper(wrapper, func) + + return decorator + + +def cachedmethod(cache, key=hashkey, lock=None): + """Decorator to wrap a class or instance method with a memoizing + callable that saves results in a cache. + + """ + + def decorator(method): + if lock is None: + + def wrapper(self, *args, **kwargs): + c = cache(self) + if c is None: + return method(self, *args, **kwargs) + k = key(*args, **kwargs) + try: + return c[k] + except KeyError: + pass # key not found + v = method(self, *args, **kwargs) + try: + c[k] = v + except ValueError: + pass # value too large + return v + + else: + + def wrapper(self, *args, **kwargs): + c = cache(self) + if c is None: + return method(self, *args, **kwargs) + k = key(*args, **kwargs) + try: + with lock(self): + return c[k] + except KeyError: + pass # key not found + v = method(self, *args, **kwargs) + # in case of a race, prefer the item already in the cache + try: + with lock(self): + return c.setdefault(k, v) + except ValueError: + return v # value too large + + return functools.update_wrapper(wrapper, method) + + return decorator diff --git a/src/cachetools/fifo.py b/src/cachetools/fifo.py new file mode 100644 index 0000000..e7c377e --- /dev/null +++ b/src/cachetools/fifo.py @@ -0,0 +1,31 @@ +import collections + +from .cache import Cache + + +class FIFOCache(Cache): + """First In First Out (FIFO) cache implementation.""" + + def __init__(self, maxsize, getsizeof=None): + Cache.__init__(self, maxsize, getsizeof) + self.__order = collections.OrderedDict() + + def __setitem__(self, key, value, cache_setitem=Cache.__setitem__): + cache_setitem(self, key, value) + try: + self.__order.move_to_end(key) + except KeyError: + self.__order[key] = None + + def __delitem__(self, key, cache_delitem=Cache.__delitem__): + cache_delitem(self, key) + del self.__order[key] + + def popitem(self): + """Remove and return the `(key, value)` pair first inserted.""" + try: + key = next(iter(self.__order)) + except StopIteration: + raise KeyError("%s is empty" % type(self).__name__) from None + else: + return (key, self.pop(key)) diff --git a/src/cachetools/func.py b/src/cachetools/func.py new file mode 100644 index 0000000..57fb72d --- /dev/null +++ b/src/cachetools/func.py @@ -0,0 +1,176 @@ +"""`functools.lru_cache` compatible memoizing function decorators.""" + +import collections +import functools +import math +import random +import time + +try: + from threading import RLock +except ImportError: # pragma: no cover + from dummy_threading import RLock + +from . import keys +from .fifo import FIFOCache +from .lfu import LFUCache +from .lru import LRUCache +from .mru import MRUCache +from .rr import RRCache +from .ttl import TTLCache + +__all__ = ("lfu_cache", "lru_cache", "mru_cache", "rr_cache", "ttl_cache") + + +_CacheInfo = collections.namedtuple( + "CacheInfo", ["hits", "misses", "maxsize", "currsize"] +) + + +class _UnboundCache(dict): + @property + def maxsize(self): + return None + + @property + def currsize(self): + return len(self) + + +class _UnboundTTLCache(TTLCache): + def __init__(self, ttl, timer): + TTLCache.__init__(self, math.inf, ttl, timer) + + @property + def maxsize(self): + return None + + +def _cache(cache, typed): + maxsize = cache.maxsize + + def decorator(func): + key = keys.typedkey if typed else keys.hashkey + lock = RLock() + stats = [0, 0] + + def wrapper(*args, **kwargs): + k = key(*args, **kwargs) + with lock: + try: + v = cache[k] + stats[0] += 1 + return v + except KeyError: + stats[1] += 1 + v = func(*args, **kwargs) + # in case of a race, prefer the item already in the cache + try: + with lock: + return cache.setdefault(k, v) + except ValueError: + return v # value too large + + def cache_info(): + with lock: + hits, misses = stats + maxsize = cache.maxsize + currsize = cache.currsize + return _CacheInfo(hits, misses, maxsize, currsize) + + def cache_clear(): + with lock: + try: + cache.clear() + finally: + stats[:] = [0, 0] + + wrapper.cache_info = cache_info + wrapper.cache_clear = cache_clear + wrapper.cache_parameters = lambda: {"maxsize": maxsize, "typed": typed} + functools.update_wrapper(wrapper, func) + return wrapper + + return decorator + + +def fifo_cache(maxsize=128, typed=False): + """Decorator to wrap a function with a memoizing callable that saves + up to `maxsize` results based on a First In First Out (FIFO) + algorithm. + + """ + if maxsize is None: + return _cache(_UnboundCache(), typed) + elif callable(maxsize): + return _cache(FIFOCache(128), typed)(maxsize) + else: + return _cache(FIFOCache(maxsize), typed) + + +def lfu_cache(maxsize=128, typed=False): + """Decorator to wrap a function with a memoizing callable that saves + up to `maxsize` results based on a Least Frequently Used (LFU) + algorithm. + + """ + if maxsize is None: + return _cache(_UnboundCache(), typed) + elif callable(maxsize): + return _cache(LFUCache(128), typed)(maxsize) + else: + return _cache(LFUCache(maxsize), typed) + + +def lru_cache(maxsize=128, typed=False): + """Decorator to wrap a function with a memoizing callable that saves + up to `maxsize` results based on a Least Recently Used (LRU) + algorithm. + + """ + if maxsize is None: + return _cache(_UnboundCache(), typed) + elif callable(maxsize): + return _cache(LRUCache(128), typed)(maxsize) + else: + return _cache(LRUCache(maxsize), typed) + + +def mru_cache(maxsize=128, typed=False): + """Decorator to wrap a function with a memoizing callable that saves + up to `maxsize` results based on a Most Recently Used (MRU) + algorithm. + """ + if maxsize is None: + return _cache(_UnboundCache(), typed) + elif callable(maxsize): + return _cache(MRUCache(128), typed)(maxsize) + else: + return _cache(MRUCache(maxsize), typed) + + +def rr_cache(maxsize=128, choice=random.choice, typed=False): + """Decorator to wrap a function with a memoizing callable that saves + up to `maxsize` results based on a Random Replacement (RR) + algorithm. + + """ + if maxsize is None: + return _cache(_UnboundCache(), typed) + elif callable(maxsize): + return _cache(RRCache(128, choice), typed)(maxsize) + else: + return _cache(RRCache(maxsize, choice), typed) + + +def ttl_cache(maxsize=128, ttl=600, timer=time.monotonic, typed=False): + """Decorator to wrap a function with a memoizing callable that saves + up to `maxsize` results based on a Least Recently Used (LRU) + algorithm with a per-item time-to-live (TTL) value. + """ + if maxsize is None: + return _cache(_UnboundTTLCache(ttl, timer), typed) + elif callable(maxsize): + return _cache(TTLCache(128, ttl, timer), typed)(maxsize) + else: + return _cache(TTLCache(maxsize, ttl, timer), typed) diff --git a/src/cachetools/keys.py b/src/cachetools/keys.py new file mode 100644 index 0000000..13630a4 --- /dev/null +++ b/src/cachetools/keys.py @@ -0,0 +1,52 @@ +"""Key functions for memoizing decorators.""" + +__all__ = ("hashkey", "typedkey") + + +class _HashedTuple(tuple): + """A tuple that ensures that hash() will be called no more than once + per element, since cache decorators will hash the key multiple + times on a cache miss. See also _HashedSeq in the standard + library functools implementation. + + """ + + __hashvalue = None + + def __hash__(self, hash=tuple.__hash__): + hashvalue = self.__hashvalue + if hashvalue is None: + self.__hashvalue = hashvalue = hash(self) + return hashvalue + + def __add__(self, other, add=tuple.__add__): + return _HashedTuple(add(self, other)) + + def __radd__(self, other, add=tuple.__add__): + return _HashedTuple(add(other, self)) + + def __getstate__(self): + return {} + + +# used for separating keyword arguments; we do not use an object +# instance here so identity is preserved when pickling/unpickling +_kwmark = (_HashedTuple,) + + +def hashkey(*args, **kwargs): + """Return a cache key for the specified hashable arguments.""" + + if kwargs: + return _HashedTuple(args + sum(sorted(kwargs.items()), _kwmark)) + else: + return _HashedTuple(args) + + +def typedkey(*args, **kwargs): + """Return a typed cache key for the specified hashable arguments.""" + + key = hashkey(*args, **kwargs) + key += tuple(type(v) for v in args) + key += tuple(type(v) for _, v in sorted(kwargs.items())) + return key diff --git a/src/cachetools/lfu.py b/src/cachetools/lfu.py new file mode 100644 index 0000000..6289b5c --- /dev/null +++ b/src/cachetools/lfu.py @@ -0,0 +1,34 @@ +import collections + +from .cache import Cache + + +class LFUCache(Cache): + """Least Frequently Used (LFU) cache implementation.""" + + def __init__(self, maxsize, getsizeof=None): + Cache.__init__(self, maxsize, getsizeof) + self.__counter = collections.Counter() + + def __getitem__(self, key, cache_getitem=Cache.__getitem__): + value = cache_getitem(self, key) + if key in self: # __missing__ may not store item + self.__counter[key] -= 1 + return value + + def __setitem__(self, key, value, cache_setitem=Cache.__setitem__): + cache_setitem(self, key, value) + self.__counter[key] -= 1 + + def __delitem__(self, key, cache_delitem=Cache.__delitem__): + cache_delitem(self, key) + del self.__counter[key] + + def popitem(self): + """Remove and return the `(key, value)` pair least frequently used.""" + try: + ((key, _),) = self.__counter.most_common(1) + except ValueError: + raise KeyError("%s is empty" % type(self).__name__) from None + else: + return (key, self.pop(key)) diff --git a/src/cachetools/lru.py b/src/cachetools/lru.py new file mode 100644 index 0000000..dbbe787 --- /dev/null +++ b/src/cachetools/lru.py @@ -0,0 +1,40 @@ +import collections + +from .cache import Cache + + +class LRUCache(Cache): + """Least Recently Used (LRU) cache implementation.""" + + def __init__(self, maxsize, getsizeof=None): + Cache.__init__(self, maxsize, getsizeof) + self.__order = collections.OrderedDict() + + def __getitem__(self, key, cache_getitem=Cache.__getitem__): + value = cache_getitem(self, key) + if key in self: # __missing__ may not store item + self.__update(key) + return value + + def __setitem__(self, key, value, cache_setitem=Cache.__setitem__): + cache_setitem(self, key, value) + self.__update(key) + + def __delitem__(self, key, cache_delitem=Cache.__delitem__): + cache_delitem(self, key) + del self.__order[key] + + def popitem(self): + """Remove and return the `(key, value)` pair least recently used.""" + try: + key = next(iter(self.__order)) + except StopIteration: + raise KeyError("%s is empty" % type(self).__name__) from None + else: + return (key, self.pop(key)) + + def __update(self, key): + try: + self.__order.move_to_end(key) + except KeyError: + self.__order[key] = None diff --git a/src/cachetools/mru.py b/src/cachetools/mru.py new file mode 100644 index 0000000..92ec6db --- /dev/null +++ b/src/cachetools/mru.py @@ -0,0 +1,40 @@ +import collections + +from cachetools.cache import Cache + + +class MRUCache(Cache): + """Most Recently Used (MRU) cache implementation.""" + + def __init__(self, maxsize, getsizeof=None): + Cache.__init__(self, maxsize, getsizeof) + self.__order = collections.OrderedDict() + + def __getitem__(self, key, cache_getitem=Cache.__getitem__): + value = cache_getitem(self, key) + if key in self: # __missing__ may not store item + self.__update(key) + return value + + def __setitem__(self, key, value, cache_setitem=Cache.__setitem__): + cache_setitem(self, key, value) + self.__update(key) + + def __delitem__(self, key, cache_delitem=Cache.__delitem__): + cache_delitem(self, key) + del self.__order[key] + + def popitem(self): + """Remove and return the `(key, value)` pair most recently used.""" + try: + key = next(iter(self.__order)) + except StopIteration: + raise KeyError("%s is empty" % type(self).__name__) from None + else: + return (key, self.pop(key)) + + def __update(self, key): + try: + self.__order.move_to_end(key, last=False) + except KeyError: + self.__order[key] = None diff --git a/src/cachetools/rr.py b/src/cachetools/rr.py new file mode 100644 index 0000000..561dbe5 --- /dev/null +++ b/src/cachetools/rr.py @@ -0,0 +1,34 @@ +import random + +from .cache import Cache + + +# random.choice cannot be pickled in Python 2.7 +def _choice(seq): + return random.choice(seq) + + +class RRCache(Cache): + """Random Replacement (RR) cache implementation.""" + + def __init__(self, maxsize, choice=random.choice, getsizeof=None): + Cache.__init__(self, maxsize, getsizeof) + # TODO: use None as default, assing to self.choice directly? + if choice is random.choice: + self.__choice = _choice + else: + self.__choice = choice + + @property + def choice(self): + """The `choice` function used by the cache.""" + return self.__choice + + def popitem(self): + """Remove and return a random `(key, value)` pair.""" + try: + key = self.__choice(list(self)) + except IndexError: + raise KeyError("%s is empty" % type(self).__name__) from None + else: + return (key, self.pop(key)) diff --git a/src/cachetools/ttl.py b/src/cachetools/ttl.py new file mode 100644 index 0000000..eef8877 --- /dev/null +++ b/src/cachetools/ttl.py @@ -0,0 +1,207 @@ +import collections +import time + +from .cache import Cache + + +class _Link: + + __slots__ = ("key", "expire", "next", "prev") + + def __init__(self, key=None, expire=None): + self.key = key + self.expire = expire + + def __reduce__(self): + return _Link, (self.key, self.expire) + + def unlink(self): + next = self.next + prev = self.prev + prev.next = next + next.prev = prev + + +class _Timer: + def __init__(self, timer): + self.__timer = timer + self.__nesting = 0 + + def __call__(self): + if self.__nesting == 0: + return self.__timer() + else: + return self.__time + + def __enter__(self): + if self.__nesting == 0: + self.__time = time = self.__timer() + else: + time = self.__time + self.__nesting += 1 + return time + + def __exit__(self, *exc): + self.__nesting -= 1 + + def __reduce__(self): + return _Timer, (self.__timer,) + + def __getattr__(self, name): + return getattr(self.__timer, name) + + +class TTLCache(Cache): + """LRU Cache implementation with per-item time-to-live (TTL) value.""" + + def __init__(self, maxsize, ttl, timer=time.monotonic, getsizeof=None): + Cache.__init__(self, maxsize, getsizeof) + self.__root = root = _Link() + root.prev = root.next = root + self.__links = collections.OrderedDict() + self.__timer = _Timer(timer) + self.__ttl = ttl + + def __contains__(self, key): + try: + link = self.__links[key] # no reordering + except KeyError: + return False + else: + return not (link.expire < self.__timer()) + + def __getitem__(self, key, cache_getitem=Cache.__getitem__): + try: + link = self.__getlink(key) + except KeyError: + expired = False + else: + expired = link.expire < self.__timer() + if expired: + return self.__missing__(key) + else: + return cache_getitem(self, key) + + def __setitem__(self, key, value, cache_setitem=Cache.__setitem__): + with self.__timer as time: + self.expire(time) + cache_setitem(self, key, value) + try: + link = self.__getlink(key) + except KeyError: + self.__links[key] = link = _Link(key) + else: + link.unlink() + link.expire = time + self.__ttl + link.next = root = self.__root + link.prev = prev = root.prev + prev.next = root.prev = link + + def __delitem__(self, key, cache_delitem=Cache.__delitem__): + cache_delitem(self, key) + link = self.__links.pop(key) + link.unlink() + if link.expire < self.__timer(): + raise KeyError(key) + + def __iter__(self): + root = self.__root + curr = root.next + while curr is not root: + # "freeze" time for iterator access + with self.__timer as time: + if not (curr.expire < time): + yield curr.key + curr = curr.next + + def __len__(self): + root = self.__root + curr = root.next + time = self.__timer() + count = len(self.__links) + while curr is not root and curr.expire < time: + count -= 1 + curr = curr.next + return count + + def __setstate__(self, state): + self.__dict__.update(state) + root = self.__root + root.prev = root.next = root + for link in sorted(self.__links.values(), key=lambda obj: obj.expire): + link.next = root + link.prev = prev = root.prev + prev.next = root.prev = link + self.expire(self.__timer()) + + def __repr__(self, cache_repr=Cache.__repr__): + with self.__timer as time: + self.expire(time) + return cache_repr(self) + + @property + def currsize(self): + with self.__timer as time: + self.expire(time) + return super().currsize + + @property + def timer(self): + """The timer function used by the cache.""" + return self.__timer + + @property + def ttl(self): + """The time-to-live value of the cache's items.""" + return self.__ttl + + def expire(self, time=None): + """Remove expired items from the cache.""" + if time is None: + time = self.__timer() + root = self.__root + curr = root.next + links = self.__links + cache_delitem = Cache.__delitem__ + while curr is not root and curr.expire < time: + cache_delitem(self, curr.key) + del links[curr.key] + next = curr.next + curr.unlink() + curr = next + + def clear(self): + with self.__timer as time: + self.expire(time) + Cache.clear(self) + + def get(self, *args, **kwargs): + with self.__timer: + return Cache.get(self, *args, **kwargs) + + def pop(self, *args, **kwargs): + with self.__timer: + return Cache.pop(self, *args, **kwargs) + + def setdefault(self, *args, **kwargs): + with self.__timer: + return Cache.setdefault(self, *args, **kwargs) + + def popitem(self): + """Remove and return the `(key, value)` pair least recently used that + has not already expired. + + """ + with self.__timer as time: + self.expire(time) + try: + key = next(iter(self.__links)) + except StopIteration: + raise KeyError("%s is empty" % type(self).__name__) from None + else: + return (key, self.pop(key)) + + def __getlink(self, key): + value = self.__links[key] + self.__links.move_to_end(key) + return value -- cgit v1.2.3 From b5a1e5a747aef732828212919921674bd2772b30 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Tue, 27 Apr 2021 18:11:08 +0200 Subject: Add Travis Python 3.10-dev build. --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index 06d1552..901e9f0 100644 --- a/.travis.yml +++ b/.travis.yml @@ -6,6 +6,7 @@ python: - 3.7 - 3.8 - 3.9 +- 3.10-dev - pypy3 install: -- cgit v1.2.3 From 743576f94041cf0c4640863a530aeb5061d09bc5 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Tue, 27 Apr 2021 19:56:36 +0200 Subject: Pin check-manifest version for Python < 3.8 due to Travis issues. --- tox.ini | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index e2315db..f2b8671 100644 --- a/tox.ini +++ b/tox.ini @@ -11,7 +11,8 @@ commands = [testenv:check-manifest] deps = - check-manifest + check-manifest==0.44; python_version < "3.8" + check-manifest; python_version >= "3.8" commands = check-manifest skip_install = true -- cgit v1.2.3 From fd9b6712efa3c7f185a43ba844473e7cad4723e3 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Tue, 27 Apr 2021 23:11:33 +0200 Subject: Remove Libraries.io SourceRank. --- README.rst | 4 ---- 1 file changed, 4 deletions(-) diff --git a/README.rst b/README.rst index 5370b34..1ddb8a2 100644 --- a/README.rst +++ b/README.rst @@ -17,10 +17,6 @@ cachetools :target: https://coveralls.io/r/tkem/cachetools :alt: Test coverage -.. image:: https://img.shields.io/librariesio/sourcerank/pypi/cachetools - :target: https://libraries.io/pypi/cachetools - :alt: Libraries.io SourceRank - .. image:: https://img.shields.io/github/license/tkem/cachetools :target: https://raw.github.com/tkem/cachetools/master/LICENSE :alt: License -- cgit v1.2.3 From e919f2fbfca1c404a9d9e313ddc0a1f4fe119f76 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Tue, 27 Apr 2021 23:11:46 +0200 Subject: Release v4.2.2. --- CHANGELOG.rst | 10 ++++++++++ src/cachetools/__init__.py | 2 +- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 4bfad6b..cf4986e 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,13 @@ +v4.2.2 (2021-04-27) +=================== + +- Update build environment. + +- Remove Python 2 remnants. + +- Format code with Black. + + v4.2.1 (2021-01-24) =================== diff --git a/src/cachetools/__init__.py b/src/cachetools/__init__.py index 1c925cc..c9d37e2 100644 --- a/src/cachetools/__init__.py +++ b/src/cachetools/__init__.py @@ -21,4 +21,4 @@ __all__ = ( "cachedmethod", ) -__version__ = "4.2.1" +__version__ = "4.2.2" -- cgit v1.2.3 From fc477611191dcd85a277244cef7c17f909d3dd15 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Mon, 9 Aug 2021 21:57:40 +0200 Subject: Move CI to GitHub Actions, coverage to Codecov. --- .github/workflows/ci.yml | 22 ++++++++++++++++++++++ .travis.yml | 19 ------------------- README.rst | 10 +++++----- tox.ini | 3 +-- 4 files changed, 28 insertions(+), 26 deletions(-) create mode 100644 .github/workflows/ci.yml delete mode 100644 .travis.yml diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..b0a9160 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,22 @@ +name: CI + +on: [push, pull_request, workflow_dispatch] + +jobs: + main: + name: Python ${{ matrix.python }} + runs-on: ubuntu-20.04 + strategy: + fail-fast: false + matrix: + python: ["3.6", "3.7", "3.8", "3.9", "pypy-3.6", "pypy-3.7"] + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python }} + - run: python -m pip install coverage tox + - run: python -m tox + - uses: codecov/codecov-action@v1 + with: + name: ${{ matrix.python }} diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 901e9f0..0000000 --- a/.travis.yml +++ /dev/null @@ -1,19 +0,0 @@ -language: python - -python: -- 3.5 -- 3.6 -- 3.7 -- 3.8 -- 3.9 -- 3.10-dev -- pypy3 - -install: -- pip install coveralls tox - -script: -- tox - -after_success: -- coveralls diff --git a/README.rst b/README.rst index 1ddb8a2..106d64a 100644 --- a/README.rst +++ b/README.rst @@ -9,12 +9,12 @@ cachetools :target: https://cachetools.readthedocs.io/ :alt: Documentation build status -.. image:: https://img.shields.io/travis/tkem/cachetools - :target: https://travis-ci.org/tkem/cachetools/ - :alt: Travis CI build status +.. image:: https://img.shields.io/github/workflow/status/tkem/cachetools/CI + :target: https://github.com/tkem/cachetools/actions + :alt: CI build status -.. image:: https://img.shields.io/coveralls/tkem/cachetools - :target: https://coveralls.io/r/tkem/cachetools +.. image:: https://img.shields.io/codecov/c/github/tkem/cachetools/master.svg + :target: https://codecov.io/gh/tkem/cachetools :alt: Test coverage .. image:: https://img.shields.io/github/license/tkem/cachetools diff --git a/tox.ini b/tox.ini index f2b8671..b36d00c 100644 --- a/tox.ini +++ b/tox.ini @@ -3,7 +3,6 @@ envlist = check-manifest,docs,doctest,flake8,py [testenv] deps = - coverage pytest pytest-cov commands = @@ -32,7 +31,7 @@ commands = [testenv:flake8] deps = flake8 - flake8-black; python_version >= "3.6" and implementation_name == "cpython" + flake8-black; implementation_name == "cpython" flake8-bugbear flake8-import-order commands = -- cgit v1.2.3 From 40d2710e1cd9bedbfee0bf9a1490349e15271e33 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Mon, 9 Aug 2021 22:11:55 +0200 Subject: Fix CI target URL. --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index 106d64a..ed962f8 100644 --- a/README.rst +++ b/README.rst @@ -10,7 +10,7 @@ cachetools :alt: Documentation build status .. image:: https://img.shields.io/github/workflow/status/tkem/cachetools/CI - :target: https://github.com/tkem/cachetools/actions + :target: https://github.com/tkem/cachetools/actions/workflows/ci.yml :alt: CI build status .. image:: https://img.shields.io/codecov/c/github/tkem/cachetools/master.svg -- cgit v1.2.3 From be507a6234ac6f48ed84052a414e38dfb22aaa8a Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Mon, 27 Sep 2021 21:02:20 +0200 Subject: Fix #178: Flatten package file hierarchy. --- src/cachetools/__init__.py | 590 ++++++++++++++++++++++++++++++++++++++++++- src/cachetools/cache.py | 117 --------- src/cachetools/decorators.py | 102 -------- src/cachetools/fifo.py | 31 --- src/cachetools/func.py | 11 +- src/cachetools/lfu.py | 34 --- src/cachetools/lru.py | 40 --- src/cachetools/mru.py | 40 --- src/cachetools/rr.py | 34 --- src/cachetools/ttl.py | 207 --------------- 10 files changed, 584 insertions(+), 622 deletions(-) delete mode 100644 src/cachetools/cache.py delete mode 100644 src/cachetools/decorators.py delete mode 100644 src/cachetools/fifo.py delete mode 100644 src/cachetools/lfu.py delete mode 100644 src/cachetools/lru.py delete mode 100644 src/cachetools/mru.py delete mode 100644 src/cachetools/rr.py delete mode 100644 src/cachetools/ttl.py diff --git a/src/cachetools/__init__.py b/src/cachetools/__init__.py index c9d37e2..0e5546e 100644 --- a/src/cachetools/__init__.py +++ b/src/cachetools/__init__.py @@ -1,14 +1,5 @@ """Extensible memoizing collections and decorators.""" -from .cache import Cache -from .decorators import cached, cachedmethod -from .fifo import FIFOCache -from .lfu import LFUCache -from .lru import LRUCache -from .mru import MRUCache -from .rr import RRCache -from .ttl import TTLCache - __all__ = ( "Cache", "FIFOCache", @@ -22,3 +13,584 @@ __all__ = ( ) __version__ = "4.2.2" + +import collections +import collections.abc +import functools +import random +import time + +from .keys import hashkey + + +class _DefaultSize: + + __slots__ = () + + def __getitem__(self, _): + return 1 + + def __setitem__(self, _, value): + assert value == 1 + + def pop(self, _): + return 1 + + +class Cache(collections.abc.MutableMapping): + """Mutable mapping to serve as a simple cache or cache base class.""" + + __marker = object() + + __size = _DefaultSize() + + def __init__(self, maxsize, getsizeof=None): + if getsizeof: + self.getsizeof = getsizeof + if self.getsizeof is not Cache.getsizeof: + self.__size = dict() + self.__data = dict() + self.__currsize = 0 + self.__maxsize = maxsize + + def __repr__(self): + return "%s(%r, maxsize=%r, currsize=%r)" % ( + self.__class__.__name__, + list(self.__data.items()), + self.__maxsize, + self.__currsize, + ) + + def __getitem__(self, key): + try: + return self.__data[key] + except KeyError: + return self.__missing__(key) + + def __setitem__(self, key, value): + maxsize = self.__maxsize + size = self.getsizeof(value) + if size > maxsize: + raise ValueError("value too large") + if key not in self.__data or self.__size[key] < size: + while self.__currsize + size > maxsize: + self.popitem() + if key in self.__data: + diffsize = size - self.__size[key] + else: + diffsize = size + self.__data[key] = value + self.__size[key] = size + self.__currsize += diffsize + + def __delitem__(self, key): + size = self.__size.pop(key) + del self.__data[key] + self.__currsize -= size + + def __contains__(self, key): + return key in self.__data + + def __missing__(self, key): + raise KeyError(key) + + def __iter__(self): + return iter(self.__data) + + def __len__(self): + return len(self.__data) + + def get(self, key, default=None): + if key in self: + return self[key] + else: + return default + + def pop(self, key, default=__marker): + if key in self: + value = self[key] + del self[key] + elif default is self.__marker: + raise KeyError(key) + else: + value = default + return value + + def setdefault(self, key, default=None): + if key in self: + value = self[key] + else: + self[key] = value = default + return value + + @property + def maxsize(self): + """The maximum size of the cache.""" + return self.__maxsize + + @property + def currsize(self): + """The current size of the cache.""" + return self.__currsize + + @staticmethod + def getsizeof(value): + """Return the size of a cache element's value.""" + return 1 + + +class FIFOCache(Cache): + """First In First Out (FIFO) cache implementation.""" + + def __init__(self, maxsize, getsizeof=None): + Cache.__init__(self, maxsize, getsizeof) + self.__order = collections.OrderedDict() + + def __setitem__(self, key, value, cache_setitem=Cache.__setitem__): + cache_setitem(self, key, value) + try: + self.__order.move_to_end(key) + except KeyError: + self.__order[key] = None + + def __delitem__(self, key, cache_delitem=Cache.__delitem__): + cache_delitem(self, key) + del self.__order[key] + + def popitem(self): + """Remove and return the `(key, value)` pair first inserted.""" + try: + key = next(iter(self.__order)) + except StopIteration: + raise KeyError("%s is empty" % type(self).__name__) from None + else: + return (key, self.pop(key)) + + +class LFUCache(Cache): + """Least Frequently Used (LFU) cache implementation.""" + + def __init__(self, maxsize, getsizeof=None): + Cache.__init__(self, maxsize, getsizeof) + self.__counter = collections.Counter() + + def __getitem__(self, key, cache_getitem=Cache.__getitem__): + value = cache_getitem(self, key) + if key in self: # __missing__ may not store item + self.__counter[key] -= 1 + return value + + def __setitem__(self, key, value, cache_setitem=Cache.__setitem__): + cache_setitem(self, key, value) + self.__counter[key] -= 1 + + def __delitem__(self, key, cache_delitem=Cache.__delitem__): + cache_delitem(self, key) + del self.__counter[key] + + def popitem(self): + """Remove and return the `(key, value)` pair least frequently used.""" + try: + ((key, _),) = self.__counter.most_common(1) + except ValueError: + raise KeyError("%s is empty" % type(self).__name__) from None + else: + return (key, self.pop(key)) + + +class LRUCache(Cache): + """Least Recently Used (LRU) cache implementation.""" + + def __init__(self, maxsize, getsizeof=None): + Cache.__init__(self, maxsize, getsizeof) + self.__order = collections.OrderedDict() + + def __getitem__(self, key, cache_getitem=Cache.__getitem__): + value = cache_getitem(self, key) + if key in self: # __missing__ may not store item + self.__update(key) + return value + + def __setitem__(self, key, value, cache_setitem=Cache.__setitem__): + cache_setitem(self, key, value) + self.__update(key) + + def __delitem__(self, key, cache_delitem=Cache.__delitem__): + cache_delitem(self, key) + del self.__order[key] + + def popitem(self): + """Remove and return the `(key, value)` pair least recently used.""" + try: + key = next(iter(self.__order)) + except StopIteration: + raise KeyError("%s is empty" % type(self).__name__) from None + else: + return (key, self.pop(key)) + + def __update(self, key): + try: + self.__order.move_to_end(key) + except KeyError: + self.__order[key] = None + + +class MRUCache(Cache): + """Most Recently Used (MRU) cache implementation.""" + + def __init__(self, maxsize, getsizeof=None): + Cache.__init__(self, maxsize, getsizeof) + self.__order = collections.OrderedDict() + + def __getitem__(self, key, cache_getitem=Cache.__getitem__): + value = cache_getitem(self, key) + if key in self: # __missing__ may not store item + self.__update(key) + return value + + def __setitem__(self, key, value, cache_setitem=Cache.__setitem__): + cache_setitem(self, key, value) + self.__update(key) + + def __delitem__(self, key, cache_delitem=Cache.__delitem__): + cache_delitem(self, key) + del self.__order[key] + + def popitem(self): + """Remove and return the `(key, value)` pair most recently used.""" + try: + key = next(iter(self.__order)) + except StopIteration: + raise KeyError("%s is empty" % type(self).__name__) from None + else: + return (key, self.pop(key)) + + def __update(self, key): + try: + self.__order.move_to_end(key, last=False) + except KeyError: + self.__order[key] = None + + +class RRCache(Cache): + """Random Replacement (RR) cache implementation.""" + + def __init__(self, maxsize, choice=random.choice, getsizeof=None): + Cache.__init__(self, maxsize, getsizeof) + self.__choice = choice + + @property + def choice(self): + """The `choice` function used by the cache.""" + return self.__choice + + def popitem(self): + """Remove and return a random `(key, value)` pair.""" + try: + key = self.__choice(list(self)) + except IndexError: + raise KeyError("%s is empty" % type(self).__name__) from None + else: + return (key, self.pop(key)) + + +class _Timer: + def __init__(self, timer): + self.__timer = timer + self.__nesting = 0 + + def __call__(self): + if self.__nesting == 0: + return self.__timer() + else: + return self.__time + + def __enter__(self): + if self.__nesting == 0: + self.__time = time = self.__timer() + else: + time = self.__time + self.__nesting += 1 + return time + + def __exit__(self, *exc): + self.__nesting -= 1 + + def __reduce__(self): + return _Timer, (self.__timer,) + + def __getattr__(self, name): + return getattr(self.__timer, name) + + +class _Link: + + __slots__ = ("key", "expire", "next", "prev") + + def __init__(self, key=None, expire=None): + self.key = key + self.expire = expire + + def __reduce__(self): + return _Link, (self.key, self.expire) + + def unlink(self): + next = self.next + prev = self.prev + prev.next = next + next.prev = prev + + +class TTLCache(Cache): + """LRU Cache implementation with per-item time-to-live (TTL) value.""" + + def __init__(self, maxsize, ttl, timer=time.monotonic, getsizeof=None): + Cache.__init__(self, maxsize, getsizeof) + self.__root = root = _Link() + root.prev = root.next = root + self.__links = collections.OrderedDict() + self.__timer = _Timer(timer) + self.__ttl = ttl + + def __contains__(self, key): + try: + link = self.__links[key] # no reordering + except KeyError: + return False + else: + return not (link.expire < self.__timer()) + + def __getitem__(self, key, cache_getitem=Cache.__getitem__): + try: + link = self.__getlink(key) + except KeyError: + expired = False + else: + expired = link.expire < self.__timer() + if expired: + return self.__missing__(key) + else: + return cache_getitem(self, key) + + def __setitem__(self, key, value, cache_setitem=Cache.__setitem__): + with self.__timer as time: + self.expire(time) + cache_setitem(self, key, value) + try: + link = self.__getlink(key) + except KeyError: + self.__links[key] = link = _Link(key) + else: + link.unlink() + link.expire = time + self.__ttl + link.next = root = self.__root + link.prev = prev = root.prev + prev.next = root.prev = link + + def __delitem__(self, key, cache_delitem=Cache.__delitem__): + cache_delitem(self, key) + link = self.__links.pop(key) + link.unlink() + if link.expire < self.__timer(): + raise KeyError(key) + + def __iter__(self): + root = self.__root + curr = root.next + while curr is not root: + # "freeze" time for iterator access + with self.__timer as time: + if not (curr.expire < time): + yield curr.key + curr = curr.next + + def __len__(self): + root = self.__root + curr = root.next + time = self.__timer() + count = len(self.__links) + while curr is not root and curr.expire < time: + count -= 1 + curr = curr.next + return count + + def __setstate__(self, state): + self.__dict__.update(state) + root = self.__root + root.prev = root.next = root + for link in sorted(self.__links.values(), key=lambda obj: obj.expire): + link.next = root + link.prev = prev = root.prev + prev.next = root.prev = link + self.expire(self.__timer()) + + def __repr__(self, cache_repr=Cache.__repr__): + with self.__timer as time: + self.expire(time) + return cache_repr(self) + + @property + def currsize(self): + with self.__timer as time: + self.expire(time) + return super().currsize + + @property + def timer(self): + """The timer function used by the cache.""" + return self.__timer + + @property + def ttl(self): + """The time-to-live value of the cache's items.""" + return self.__ttl + + def expire(self, time=None): + """Remove expired items from the cache.""" + if time is None: + time = self.__timer() + root = self.__root + curr = root.next + links = self.__links + cache_delitem = Cache.__delitem__ + while curr is not root and curr.expire < time: + cache_delitem(self, curr.key) + del links[curr.key] + next = curr.next + curr.unlink() + curr = next + + def clear(self): + with self.__timer as time: + self.expire(time) + Cache.clear(self) + + def get(self, *args, **kwargs): + with self.__timer: + return Cache.get(self, *args, **kwargs) + + def pop(self, *args, **kwargs): + with self.__timer: + return Cache.pop(self, *args, **kwargs) + + def setdefault(self, *args, **kwargs): + with self.__timer: + return Cache.setdefault(self, *args, **kwargs) + + def popitem(self): + """Remove and return the `(key, value)` pair least recently used that + has not already expired. + + """ + with self.__timer as time: + self.expire(time) + try: + key = next(iter(self.__links)) + except StopIteration: + raise KeyError("%s is empty" % type(self).__name__) from None + else: + return (key, self.pop(key)) + + def __getlink(self, key): + value = self.__links[key] + self.__links.move_to_end(key) + return value + + +def cached(cache, key=hashkey, lock=None): + """Decorator to wrap a function with a memoizing callable that saves + results in a cache. + + """ + + def decorator(func): + if cache is None: + + def wrapper(*args, **kwargs): + return func(*args, **kwargs) + + elif lock is None: + + def wrapper(*args, **kwargs): + k = key(*args, **kwargs) + try: + return cache[k] + except KeyError: + pass # key not found + v = func(*args, **kwargs) + try: + cache[k] = v + except ValueError: + pass # value too large + return v + + else: + + def wrapper(*args, **kwargs): + k = key(*args, **kwargs) + try: + with lock: + return cache[k] + except KeyError: + pass # key not found + v = func(*args, **kwargs) + # in case of a race, prefer the item already in the cache + try: + with lock: + return cache.setdefault(k, v) + except ValueError: + return v # value too large + + return functools.update_wrapper(wrapper, func) + + return decorator + + +def cachedmethod(cache, key=hashkey, lock=None): + """Decorator to wrap a class or instance method with a memoizing + callable that saves results in a cache. + + """ + + def decorator(method): + if lock is None: + + def wrapper(self, *args, **kwargs): + c = cache(self) + if c is None: + return method(self, *args, **kwargs) + k = key(*args, **kwargs) + try: + return c[k] + except KeyError: + pass # key not found + v = method(self, *args, **kwargs) + try: + c[k] = v + except ValueError: + pass # value too large + return v + + else: + + def wrapper(self, *args, **kwargs): + c = cache(self) + if c is None: + return method(self, *args, **kwargs) + k = key(*args, **kwargs) + try: + with lock(self): + return c[k] + except KeyError: + pass # key not found + v = method(self, *args, **kwargs) + # in case of a race, prefer the item already in the cache + try: + with lock(self): + return c.setdefault(k, v) + except ValueError: + return v # value too large + + return functools.update_wrapper(wrapper, method) + + return decorator diff --git a/src/cachetools/cache.py b/src/cachetools/cache.py deleted file mode 100644 index 973d50b..0000000 --- a/src/cachetools/cache.py +++ /dev/null @@ -1,117 +0,0 @@ -from collections.abc import MutableMapping - - -class _DefaultSize: - - __slots__ = () - - def __getitem__(self, _): - return 1 - - def __setitem__(self, _, value): - assert value == 1 - - def pop(self, _): - return 1 - - -class Cache(MutableMapping): - """Mutable mapping to serve as a simple cache or cache base class.""" - - __marker = object() - - __size = _DefaultSize() - - def __init__(self, maxsize, getsizeof=None): - if getsizeof: - self.getsizeof = getsizeof - if self.getsizeof is not Cache.getsizeof: - self.__size = dict() - self.__data = dict() - self.__currsize = 0 - self.__maxsize = maxsize - - def __repr__(self): - return "%s(%r, maxsize=%r, currsize=%r)" % ( - self.__class__.__name__, - list(self.__data.items()), - self.__maxsize, - self.__currsize, - ) - - def __getitem__(self, key): - try: - return self.__data[key] - except KeyError: - return self.__missing__(key) - - def __setitem__(self, key, value): - maxsize = self.__maxsize - size = self.getsizeof(value) - if size > maxsize: - raise ValueError("value too large") - if key not in self.__data or self.__size[key] < size: - while self.__currsize + size > maxsize: - self.popitem() - if key in self.__data: - diffsize = size - self.__size[key] - else: - diffsize = size - self.__data[key] = value - self.__size[key] = size - self.__currsize += diffsize - - def __delitem__(self, key): - size = self.__size.pop(key) - del self.__data[key] - self.__currsize -= size - - def __contains__(self, key): - return key in self.__data - - def __missing__(self, key): - raise KeyError(key) - - def __iter__(self): - return iter(self.__data) - - def __len__(self): - return len(self.__data) - - def get(self, key, default=None): - if key in self: - return self[key] - else: - return default - - def pop(self, key, default=__marker): - if key in self: - value = self[key] - del self[key] - elif default is self.__marker: - raise KeyError(key) - else: - value = default - return value - - def setdefault(self, key, default=None): - if key in self: - value = self[key] - else: - self[key] = value = default - return value - - @property - def maxsize(self): - """The maximum size of the cache.""" - return self.__maxsize - - @property - def currsize(self): - """The current size of the cache.""" - return self.__currsize - - @staticmethod - def getsizeof(value): - """Return the size of a cache element's value.""" - return 1 diff --git a/src/cachetools/decorators.py b/src/cachetools/decorators.py deleted file mode 100644 index 3e78603..0000000 --- a/src/cachetools/decorators.py +++ /dev/null @@ -1,102 +0,0 @@ -import functools - -from .keys import hashkey - - -def cached(cache, key=hashkey, lock=None): - """Decorator to wrap a function with a memoizing callable that saves - results in a cache. - - """ - - def decorator(func): - if cache is None: - - def wrapper(*args, **kwargs): - return func(*args, **kwargs) - - elif lock is None: - - def wrapper(*args, **kwargs): - k = key(*args, **kwargs) - try: - return cache[k] - except KeyError: - pass # key not found - v = func(*args, **kwargs) - try: - cache[k] = v - except ValueError: - pass # value too large - return v - - else: - - def wrapper(*args, **kwargs): - k = key(*args, **kwargs) - try: - with lock: - return cache[k] - except KeyError: - pass # key not found - v = func(*args, **kwargs) - # in case of a race, prefer the item already in the cache - try: - with lock: - return cache.setdefault(k, v) - except ValueError: - return v # value too large - - return functools.update_wrapper(wrapper, func) - - return decorator - - -def cachedmethod(cache, key=hashkey, lock=None): - """Decorator to wrap a class or instance method with a memoizing - callable that saves results in a cache. - - """ - - def decorator(method): - if lock is None: - - def wrapper(self, *args, **kwargs): - c = cache(self) - if c is None: - return method(self, *args, **kwargs) - k = key(*args, **kwargs) - try: - return c[k] - except KeyError: - pass # key not found - v = method(self, *args, **kwargs) - try: - c[k] = v - except ValueError: - pass # value too large - return v - - else: - - def wrapper(self, *args, **kwargs): - c = cache(self) - if c is None: - return method(self, *args, **kwargs) - k = key(*args, **kwargs) - try: - with lock(self): - return c[k] - except KeyError: - pass # key not found - v = method(self, *args, **kwargs) - # in case of a race, prefer the item already in the cache - try: - with lock(self): - return c.setdefault(k, v) - except ValueError: - return v # value too large - - return functools.update_wrapper(wrapper, method) - - return decorator diff --git a/src/cachetools/fifo.py b/src/cachetools/fifo.py deleted file mode 100644 index e7c377e..0000000 --- a/src/cachetools/fifo.py +++ /dev/null @@ -1,31 +0,0 @@ -import collections - -from .cache import Cache - - -class FIFOCache(Cache): - """First In First Out (FIFO) cache implementation.""" - - def __init__(self, maxsize, getsizeof=None): - Cache.__init__(self, maxsize, getsizeof) - self.__order = collections.OrderedDict() - - def __setitem__(self, key, value, cache_setitem=Cache.__setitem__): - cache_setitem(self, key, value) - try: - self.__order.move_to_end(key) - except KeyError: - self.__order[key] = None - - def __delitem__(self, key, cache_delitem=Cache.__delitem__): - cache_delitem(self, key) - del self.__order[key] - - def popitem(self): - """Remove and return the `(key, value)` pair first inserted.""" - try: - key = next(iter(self.__order)) - except StopIteration: - raise KeyError("%s is empty" % type(self).__name__) from None - else: - return (key, self.pop(key)) diff --git a/src/cachetools/func.py b/src/cachetools/func.py index 57fb72d..01702c2 100644 --- a/src/cachetools/func.py +++ b/src/cachetools/func.py @@ -1,5 +1,7 @@ """`functools.lru_cache` compatible memoizing function decorators.""" +__all__ = ("fifo_cache", "lfu_cache", "lru_cache", "mru_cache", "rr_cache", "ttl_cache") + import collections import functools import math @@ -11,15 +13,8 @@ try: except ImportError: # pragma: no cover from dummy_threading import RLock +from . import FIFOCache, LFUCache, LRUCache, MRUCache, RRCache, TTLCache from . import keys -from .fifo import FIFOCache -from .lfu import LFUCache -from .lru import LRUCache -from .mru import MRUCache -from .rr import RRCache -from .ttl import TTLCache - -__all__ = ("lfu_cache", "lru_cache", "mru_cache", "rr_cache", "ttl_cache") _CacheInfo = collections.namedtuple( diff --git a/src/cachetools/lfu.py b/src/cachetools/lfu.py deleted file mode 100644 index 6289b5c..0000000 --- a/src/cachetools/lfu.py +++ /dev/null @@ -1,34 +0,0 @@ -import collections - -from .cache import Cache - - -class LFUCache(Cache): - """Least Frequently Used (LFU) cache implementation.""" - - def __init__(self, maxsize, getsizeof=None): - Cache.__init__(self, maxsize, getsizeof) - self.__counter = collections.Counter() - - def __getitem__(self, key, cache_getitem=Cache.__getitem__): - value = cache_getitem(self, key) - if key in self: # __missing__ may not store item - self.__counter[key] -= 1 - return value - - def __setitem__(self, key, value, cache_setitem=Cache.__setitem__): - cache_setitem(self, key, value) - self.__counter[key] -= 1 - - def __delitem__(self, key, cache_delitem=Cache.__delitem__): - cache_delitem(self, key) - del self.__counter[key] - - def popitem(self): - """Remove and return the `(key, value)` pair least frequently used.""" - try: - ((key, _),) = self.__counter.most_common(1) - except ValueError: - raise KeyError("%s is empty" % type(self).__name__) from None - else: - return (key, self.pop(key)) diff --git a/src/cachetools/lru.py b/src/cachetools/lru.py deleted file mode 100644 index dbbe787..0000000 --- a/src/cachetools/lru.py +++ /dev/null @@ -1,40 +0,0 @@ -import collections - -from .cache import Cache - - -class LRUCache(Cache): - """Least Recently Used (LRU) cache implementation.""" - - def __init__(self, maxsize, getsizeof=None): - Cache.__init__(self, maxsize, getsizeof) - self.__order = collections.OrderedDict() - - def __getitem__(self, key, cache_getitem=Cache.__getitem__): - value = cache_getitem(self, key) - if key in self: # __missing__ may not store item - self.__update(key) - return value - - def __setitem__(self, key, value, cache_setitem=Cache.__setitem__): - cache_setitem(self, key, value) - self.__update(key) - - def __delitem__(self, key, cache_delitem=Cache.__delitem__): - cache_delitem(self, key) - del self.__order[key] - - def popitem(self): - """Remove and return the `(key, value)` pair least recently used.""" - try: - key = next(iter(self.__order)) - except StopIteration: - raise KeyError("%s is empty" % type(self).__name__) from None - else: - return (key, self.pop(key)) - - def __update(self, key): - try: - self.__order.move_to_end(key) - except KeyError: - self.__order[key] = None diff --git a/src/cachetools/mru.py b/src/cachetools/mru.py deleted file mode 100644 index 92ec6db..0000000 --- a/src/cachetools/mru.py +++ /dev/null @@ -1,40 +0,0 @@ -import collections - -from cachetools.cache import Cache - - -class MRUCache(Cache): - """Most Recently Used (MRU) cache implementation.""" - - def __init__(self, maxsize, getsizeof=None): - Cache.__init__(self, maxsize, getsizeof) - self.__order = collections.OrderedDict() - - def __getitem__(self, key, cache_getitem=Cache.__getitem__): - value = cache_getitem(self, key) - if key in self: # __missing__ may not store item - self.__update(key) - return value - - def __setitem__(self, key, value, cache_setitem=Cache.__setitem__): - cache_setitem(self, key, value) - self.__update(key) - - def __delitem__(self, key, cache_delitem=Cache.__delitem__): - cache_delitem(self, key) - del self.__order[key] - - def popitem(self): - """Remove and return the `(key, value)` pair most recently used.""" - try: - key = next(iter(self.__order)) - except StopIteration: - raise KeyError("%s is empty" % type(self).__name__) from None - else: - return (key, self.pop(key)) - - def __update(self, key): - try: - self.__order.move_to_end(key, last=False) - except KeyError: - self.__order[key] = None diff --git a/src/cachetools/rr.py b/src/cachetools/rr.py deleted file mode 100644 index 561dbe5..0000000 --- a/src/cachetools/rr.py +++ /dev/null @@ -1,34 +0,0 @@ -import random - -from .cache import Cache - - -# random.choice cannot be pickled in Python 2.7 -def _choice(seq): - return random.choice(seq) - - -class RRCache(Cache): - """Random Replacement (RR) cache implementation.""" - - def __init__(self, maxsize, choice=random.choice, getsizeof=None): - Cache.__init__(self, maxsize, getsizeof) - # TODO: use None as default, assing to self.choice directly? - if choice is random.choice: - self.__choice = _choice - else: - self.__choice = choice - - @property - def choice(self): - """The `choice` function used by the cache.""" - return self.__choice - - def popitem(self): - """Remove and return a random `(key, value)` pair.""" - try: - key = self.__choice(list(self)) - except IndexError: - raise KeyError("%s is empty" % type(self).__name__) from None - else: - return (key, self.pop(key)) diff --git a/src/cachetools/ttl.py b/src/cachetools/ttl.py deleted file mode 100644 index eef8877..0000000 --- a/src/cachetools/ttl.py +++ /dev/null @@ -1,207 +0,0 @@ -import collections -import time - -from .cache import Cache - - -class _Link: - - __slots__ = ("key", "expire", "next", "prev") - - def __init__(self, key=None, expire=None): - self.key = key - self.expire = expire - - def __reduce__(self): - return _Link, (self.key, self.expire) - - def unlink(self): - next = self.next - prev = self.prev - prev.next = next - next.prev = prev - - -class _Timer: - def __init__(self, timer): - self.__timer = timer - self.__nesting = 0 - - def __call__(self): - if self.__nesting == 0: - return self.__timer() - else: - return self.__time - - def __enter__(self): - if self.__nesting == 0: - self.__time = time = self.__timer() - else: - time = self.__time - self.__nesting += 1 - return time - - def __exit__(self, *exc): - self.__nesting -= 1 - - def __reduce__(self): - return _Timer, (self.__timer,) - - def __getattr__(self, name): - return getattr(self.__timer, name) - - -class TTLCache(Cache): - """LRU Cache implementation with per-item time-to-live (TTL) value.""" - - def __init__(self, maxsize, ttl, timer=time.monotonic, getsizeof=None): - Cache.__init__(self, maxsize, getsizeof) - self.__root = root = _Link() - root.prev = root.next = root - self.__links = collections.OrderedDict() - self.__timer = _Timer(timer) - self.__ttl = ttl - - def __contains__(self, key): - try: - link = self.__links[key] # no reordering - except KeyError: - return False - else: - return not (link.expire < self.__timer()) - - def __getitem__(self, key, cache_getitem=Cache.__getitem__): - try: - link = self.__getlink(key) - except KeyError: - expired = False - else: - expired = link.expire < self.__timer() - if expired: - return self.__missing__(key) - else: - return cache_getitem(self, key) - - def __setitem__(self, key, value, cache_setitem=Cache.__setitem__): - with self.__timer as time: - self.expire(time) - cache_setitem(self, key, value) - try: - link = self.__getlink(key) - except KeyError: - self.__links[key] = link = _Link(key) - else: - link.unlink() - link.expire = time + self.__ttl - link.next = root = self.__root - link.prev = prev = root.prev - prev.next = root.prev = link - - def __delitem__(self, key, cache_delitem=Cache.__delitem__): - cache_delitem(self, key) - link = self.__links.pop(key) - link.unlink() - if link.expire < self.__timer(): - raise KeyError(key) - - def __iter__(self): - root = self.__root - curr = root.next - while curr is not root: - # "freeze" time for iterator access - with self.__timer as time: - if not (curr.expire < time): - yield curr.key - curr = curr.next - - def __len__(self): - root = self.__root - curr = root.next - time = self.__timer() - count = len(self.__links) - while curr is not root and curr.expire < time: - count -= 1 - curr = curr.next - return count - - def __setstate__(self, state): - self.__dict__.update(state) - root = self.__root - root.prev = root.next = root - for link in sorted(self.__links.values(), key=lambda obj: obj.expire): - link.next = root - link.prev = prev = root.prev - prev.next = root.prev = link - self.expire(self.__timer()) - - def __repr__(self, cache_repr=Cache.__repr__): - with self.__timer as time: - self.expire(time) - return cache_repr(self) - - @property - def currsize(self): - with self.__timer as time: - self.expire(time) - return super().currsize - - @property - def timer(self): - """The timer function used by the cache.""" - return self.__timer - - @property - def ttl(self): - """The time-to-live value of the cache's items.""" - return self.__ttl - - def expire(self, time=None): - """Remove expired items from the cache.""" - if time is None: - time = self.__timer() - root = self.__root - curr = root.next - links = self.__links - cache_delitem = Cache.__delitem__ - while curr is not root and curr.expire < time: - cache_delitem(self, curr.key) - del links[curr.key] - next = curr.next - curr.unlink() - curr = next - - def clear(self): - with self.__timer as time: - self.expire(time) - Cache.clear(self) - - def get(self, *args, **kwargs): - with self.__timer: - return Cache.get(self, *args, **kwargs) - - def pop(self, *args, **kwargs): - with self.__timer: - return Cache.pop(self, *args, **kwargs) - - def setdefault(self, *args, **kwargs): - with self.__timer: - return Cache.setdefault(self, *args, **kwargs) - - def popitem(self): - """Remove and return the `(key, value)` pair least recently used that - has not already expired. - - """ - with self.__timer as time: - self.expire(time) - try: - key = next(iter(self.__links)) - except StopIteration: - raise KeyError("%s is empty" % type(self).__name__) from None - else: - return (key, self.pop(key)) - - def __getlink(self, key): - value = self.__links[key] - self.__links.move_to_end(key) - return value -- cgit v1.2.3 From 7ae6ef5749db9138a555edf58d6b01b6778cf6ad Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Mon, 27 Sep 2021 22:19:42 +0200 Subject: Explicitly specify autoclass members. --- docs/index.rst | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/index.rst b/docs/index.rst index 1c4d05b..147a19f 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -67,7 +67,7 @@ computed when the item is inserted into the cache. suitable `lock` object. .. autoclass:: Cache(maxsize, getsizeof=None) - :members: + :members: currsize, getsizeof, maxsize This class discards arbitrary items using :meth:`popitem` to make space when necessary. Derived classes may override :meth:`popitem` @@ -77,31 +77,31 @@ computed when the item is inserted into the cache. :meth:`__setitem__` and :meth:`__delitem__`. .. autoclass:: FIFOCache(maxsize, getsizeof=None) - :members: + :members: popitem This class evicts items in the order they were added to make space when necessary. .. autoclass:: LFUCache(maxsize, getsizeof=None) - :members: + :members: popitem This class counts how often an item is retrieved, and discards the items used least often to make space when necessary. .. autoclass:: LRUCache(maxsize, getsizeof=None) - :members: + :members: popitem This class discards the least recently used items first to make space when necessary. .. autoclass:: MRUCache(maxsize, getsizeof=None) - :members: + :members: popitem This class discards the most recently used items first to make space when necessary. .. autoclass:: RRCache(maxsize, choice=random.choice, getsizeof=None) - :members: + :members: choice, popitem This class randomly selects candidate items and discards them to make space when necessary. -- cgit v1.2.3 From c0162c5163b498dc31995f88147b868872ae20d4 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Wed, 29 Sep 2021 21:51:02 +0200 Subject: Fix #216: Add documentation and tests for using TTLCache with datetime. --- docs/index.rst | 14 ++++++++++++-- tests/test_ttl.py | 12 ++++++++++++ 2 files changed, 24 insertions(+), 2 deletions(-) diff --git a/docs/index.rst b/docs/index.rst index 147a19f..c9828b3 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -26,7 +26,7 @@ calls are provided, too. .. testsetup:: * import operator - from cachetools import cached, cachedmethod, LRUCache + from cachetools import cached, cachedmethod, LRUCache, TTLCache from unittest import mock urllib = mock.MagicMock() @@ -122,7 +122,17 @@ computed when the item is inserted into the cache. By default, the time-to-live is specified in seconds and :func:`time.monotonic` is used to retrieve the current time. A - custom `timer` function can be supplied if needed. + custom `timer` function can also be supplied: + + .. testcode:: + + from datetime import datetime, timedelta + + cache = TTLCache(maxsize=10, ttl=timedelta(hours=12), timer=datetime.now) + + The expression `timer() + ttl` at the time of insertion defines the + expiration time of a cache item, and must be comparable against + later results of `timer()`. .. method:: expire(self, time=None) diff --git a/tests/test_ttl.py b/tests/test_ttl.py index f677c9b..6e51a59 100644 --- a/tests/test_ttl.py +++ b/tests/test_ttl.py @@ -182,3 +182,15 @@ class TTLCacheTest(unittest.TestCase, CacheTestMixin): with self.assertRaises(KeyError): cache[(1, 2, 3)] self.assertNotIn((1, 2, 3), cache) + + def test_ttl_datetime(self): + from datetime import datetime, timedelta + + cache = TTLCache(maxsize=1, ttl=timedelta(days=1), timer=datetime.now) + + cache[1] = 1 + self.assertEqual(1, len(cache)) + cache.expire(datetime.now()) + self.assertEqual(1, len(cache)) + cache.expire(datetime.now() + timedelta(days=1)) + self.assertEqual(0, len(cache)) -- cgit v1.2.3 From 2a7afc2a33e257cc6df5db6387021bddda741264 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Wed, 29 Sep 2021 22:10:14 +0200 Subject: Fix #210: Link to typeshed typing stubs. --- README.rst | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/README.rst b/README.rst index ed962f8..869d3cb 100644 --- a/README.rst +++ b/README.rst @@ -72,6 +72,11 @@ cachetools is available from PyPI_ and can be installed by running:: pip install cachetools +Typing stubs for this package are provided by typeshed_ and can be +installed by running:: + + pip install types-cachetools + Project Resources ------------------------------------------------------------------------ @@ -96,6 +101,7 @@ Licensed under the `MIT License`_. .. _cache algorithm: https://en.wikipedia.org/wiki/Cache_algorithms .. _PyPI: https://pypi.org/project/cachetools/ +.. _typeshed: https://github.com/python/typeshed/ .. _Documentation: https://cachetools.readthedocs.io/ .. _Issue tracker: https://github.com/tkem/cachetools/issues/ .. _Source code: https://github.com/tkem/cachetools/ -- cgit v1.2.3 From bc998ba4278e06f19ab0270c42b8ff7af6448cab Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Wed, 29 Sep 2021 22:21:52 +0200 Subject: Release v4.2.3. --- CHANGELOG.rst | 11 +++++++++++ src/cachetools/__init__.py | 2 +- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index cf4986e..d7e0f20 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,14 @@ +v4.2.3 (2021-09-29) +=================== + +- Add documentation and tests for using ``TTLCache`` with + ``datetime``. + +- Link to typeshed typing stubs. + +- Flatten package file hierarchy. + + v4.2.2 (2021-04-27) =================== diff --git a/src/cachetools/__init__.py b/src/cachetools/__init__.py index 0e5546e..b2c0072 100644 --- a/src/cachetools/__init__.py +++ b/src/cachetools/__init__.py @@ -12,7 +12,7 @@ __all__ = ( "cachedmethod", ) -__version__ = "4.2.2" +__version__ = "4.2.3" import collections import collections.abc -- cgit v1.2.3 From c234536941a0e81cb28b12685ff8b76b4a05ed96 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Thu, 30 Sep 2021 12:12:13 +0200 Subject: Fix #225: Add submodule shims for backward compatibility. --- setup.cfg | 4 ++- src/cachetools/cache.py | 7 +++++ src/cachetools/fifo.py | 7 +++++ src/cachetools/lfu.py | 7 +++++ src/cachetools/lru.py | 7 +++++ src/cachetools/mru.py | 7 +++++ src/cachetools/rr.py | 7 +++++ src/cachetools/ttl.py | 7 +++++ tests/test_deprecated.py | 67 ++++++++++++++++++++++++++++++++++++++++++++++++ 9 files changed, 119 insertions(+), 1 deletion(-) create mode 100644 src/cachetools/cache.py create mode 100644 src/cachetools/fifo.py create mode 100644 src/cachetools/lfu.py create mode 100644 src/cachetools/lru.py create mode 100644 src/cachetools/mru.py create mode 100644 src/cachetools/rr.py create mode 100644 src/cachetools/ttl.py create mode 100644 tests/test_deprecated.py diff --git a/setup.cfg b/setup.cfg index 9f3f926..143aba1 100644 --- a/setup.cfg +++ b/setup.cfg @@ -36,7 +36,9 @@ where = src max-line-length = 80 exclude = .git, .tox, build select = C, E, F, W, B, B950, I, N -ignore = E501, W503 +# F401: imported but unused (submodule shims) +# E501: line too long (black) +ignore = F401, E501 [build_sphinx] source-dir = docs/ diff --git a/src/cachetools/cache.py b/src/cachetools/cache.py new file mode 100644 index 0000000..8c9dfd7 --- /dev/null +++ b/src/cachetools/cache.py @@ -0,0 +1,7 @@ +import warnings + +from . import Cache + +warnings.warn( + "cachetools.cache is deprecated, please use cachetools.Cache", DeprecationWarning +) diff --git a/src/cachetools/fifo.py b/src/cachetools/fifo.py new file mode 100644 index 0000000..ec072cd --- /dev/null +++ b/src/cachetools/fifo.py @@ -0,0 +1,7 @@ +import warnings + +from . import FIFOCache + +warnings.warn( + "cachetools.fifo is deprecated, please use cachetools.FIFOCache", DeprecationWarning +) diff --git a/src/cachetools/lfu.py b/src/cachetools/lfu.py new file mode 100644 index 0000000..44514ac --- /dev/null +++ b/src/cachetools/lfu.py @@ -0,0 +1,7 @@ +import warnings + +from . import LFUCache + +warnings.warn( + "cachetools.lfu is deprecated, please use cachetools.LFUCache", DeprecationWarning +) diff --git a/src/cachetools/lru.py b/src/cachetools/lru.py new file mode 100644 index 0000000..5d557b0 --- /dev/null +++ b/src/cachetools/lru.py @@ -0,0 +1,7 @@ +import warnings + +from . import LRUCache + +warnings.warn( + "cachetools.lru is deprecated, please use cachetools.LRUCache", DeprecationWarning +) diff --git a/src/cachetools/mru.py b/src/cachetools/mru.py new file mode 100644 index 0000000..0714bdc --- /dev/null +++ b/src/cachetools/mru.py @@ -0,0 +1,7 @@ +import warnings + +from . import MRUCache + +warnings.warn( + "cachetools.mru is deprecated, please use cachetools.MRUCache", DeprecationWarning +) diff --git a/src/cachetools/rr.py b/src/cachetools/rr.py new file mode 100644 index 0000000..f49e185 --- /dev/null +++ b/src/cachetools/rr.py @@ -0,0 +1,7 @@ +import warnings + +from . import RRCache + +warnings.warn( + "cachetools.rr is deprecated, please use cachetools.RRCache", DeprecationWarning +) diff --git a/src/cachetools/ttl.py b/src/cachetools/ttl.py new file mode 100644 index 0000000..d96b677 --- /dev/null +++ b/src/cachetools/ttl.py @@ -0,0 +1,7 @@ +import warnings + +from . import TTLCache + +warnings.warn( + "cachetools.ttl is deprecated, please use cachetools.TTLCache", DeprecationWarning +) diff --git a/tests/test_deprecated.py b/tests/test_deprecated.py new file mode 100644 index 0000000..6510c82 --- /dev/null +++ b/tests/test_deprecated.py @@ -0,0 +1,67 @@ +import unittest +import warnings + + +class DeprecatedTest(unittest.TestCase): + def test_cache(self): + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + from cachetools.cache import Cache + + assert len(w) == 1 + assert issubclass(w[-1].category, DeprecationWarning) + assert "cachetools.cache" in str(w[-1].message) + + def test_fifo(self): + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + from cachetools.fifo import FIFOCache + + assert len(w) == 1 + assert issubclass(w[-1].category, DeprecationWarning) + assert "cachetools.fifo" in str(w[-1].message) + + def test_lfu(self): + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + from cachetools.lfu import LFUCache + + assert len(w) == 1 + assert issubclass(w[-1].category, DeprecationWarning) + assert "cachetools.lfu" in str(w[-1].message) + + def test_lru(self): + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + from cachetools.lru import LRUCache + + assert len(w) == 1 + assert issubclass(w[-1].category, DeprecationWarning) + assert "cachetools.lru" in str(w[-1].message) + + def test_mru(self): + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + from cachetools.mru import MRUCache + + assert len(w) == 1 + assert issubclass(w[-1].category, DeprecationWarning) + assert "cachetools.mru" in str(w[-1].message) + + def test_rr(self): + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + from cachetools.rr import RRCache + + assert len(w) == 1 + assert issubclass(w[-1].category, DeprecationWarning) + assert "cachetools.rr" in str(w[-1].message) + + def test_ttl(self): + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + from cachetools.ttl import TTLCache + + assert len(w) == 1 + assert issubclass(w[-1].category, DeprecationWarning) + assert "cachetools.ttl" in str(w[-1].message) -- cgit v1.2.3 From 8b9bbc4b60429bd73a06ff54a8eefd141215acc2 Mon Sep 17 00:00:00 2001 From: Thomas Kemmer Date: Thu, 30 Sep 2021 12:12:24 +0200 Subject: Release v4.2.4. --- CHANGELOG.rst | 6 ++++++ src/cachetools/__init__.py | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index d7e0f20..0c24a03 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,3 +1,9 @@ +v4.2.4 (2021-09-30) +=================== + +- Add submodule shims for backward compatibility. + + v4.2.3 (2021-09-29) =================== diff --git a/src/cachetools/__init__.py b/src/cachetools/__init__.py index b2c0072..42822f0 100644 --- a/src/cachetools/__init__.py +++ b/src/cachetools/__init__.py @@ -12,7 +12,7 @@ __all__ = ( "cachedmethod", ) -__version__ = "4.2.3" +__version__ = "4.2.4" import collections import collections.abc -- cgit v1.2.3 From d2297fe1c606d512fc544cf53604ba8ee3c8324b Mon Sep 17 00:00:00 2001 From: wim glenn Date: Thu, 30 Sep 2021 10:14:34 -0500 Subject: use stacklevel=2 for deprecation warnings, so the source/line number of the user code is indicated --- src/cachetools/cache.py | 4 +++- src/cachetools/fifo.py | 4 +++- src/cachetools/lfu.py | 4 +++- src/cachetools/lru.py | 4 +++- src/cachetools/mru.py | 4 +++- src/cachetools/rr.py | 4 +++- src/cachetools/ttl.py | 4 +++- 7 files changed, 21 insertions(+), 7 deletions(-) diff --git a/src/cachetools/cache.py b/src/cachetools/cache.py index 8c9dfd7..ee5269d 100644 --- a/src/cachetools/cache.py +++ b/src/cachetools/cache.py @@ -3,5 +3,7 @@ import warnings from . import Cache warnings.warn( - "cachetools.cache is deprecated, please use cachetools.Cache", DeprecationWarning + "cachetools.cache is deprecated, please use cachetools.Cache", + DeprecationWarning, + stacklevel=2, ) diff --git a/src/cachetools/fifo.py b/src/cachetools/fifo.py index ec072cd..a29b789 100644 --- a/src/cachetools/fifo.py +++ b/src/cachetools/fifo.py @@ -3,5 +3,7 @@ import warnings from . import FIFOCache warnings.warn( - "cachetools.fifo is deprecated, please use cachetools.FIFOCache", DeprecationWarning + "cachetools.fifo is deprecated, please use cachetools.FIFOCache", + DeprecationWarning, + stacklevel=2, ) diff --git a/src/cachetools/lfu.py b/src/cachetools/lfu.py index 44514ac..5c9acef 100644 --- a/src/cachetools/lfu.py +++ b/src/cachetools/lfu.py @@ -3,5 +3,7 @@ import warnings from . import LFUCache warnings.warn( - "cachetools.lfu is deprecated, please use cachetools.LFUCache", DeprecationWarning + "cachetools.lfu is deprecated, please use cachetools.LFUCache", + DeprecationWarning, + stacklevel=2, ) diff --git a/src/cachetools/lru.py b/src/cachetools/lru.py index 5d557b0..48ddb36 100644 --- a/src/cachetools/lru.py +++ b/src/cachetools/lru.py @@ -3,5 +3,7 @@ import warnings from . import LRUCache warnings.warn( - "cachetools.lru is deprecated, please use cachetools.LRUCache", DeprecationWarning + "cachetools.lru is deprecated, please use cachetools.LRUCache", + DeprecationWarning, + stacklevel=2, ) diff --git a/src/cachetools/mru.py b/src/cachetools/mru.py index 0714bdc..a486dc4 100644 --- a/src/cachetools/mru.py +++ b/src/cachetools/mru.py @@ -3,5 +3,7 @@ import warnings from . import MRUCache warnings.warn( - "cachetools.mru is deprecated, please use cachetools.MRUCache", DeprecationWarning + "cachetools.mru is deprecated, please use cachetools.MRUCache", + DeprecationWarning, + stacklevel=2, ) diff --git a/src/cachetools/rr.py b/src/cachetools/rr.py index f49e185..81331bc 100644 --- a/src/cachetools/rr.py +++ b/src/cachetools/rr.py @@ -3,5 +3,7 @@ import warnings from . import RRCache warnings.warn( - "cachetools.rr is deprecated, please use cachetools.RRCache", DeprecationWarning + "cachetools.rr is deprecated, please use cachetools.RRCache", + DeprecationWarning, + stacklevel=2, ) diff --git a/src/cachetools/ttl.py b/src/cachetools/ttl.py index d96b677..ef343da 100644 --- a/src/cachetools/ttl.py +++ b/src/cachetools/ttl.py @@ -3,5 +3,7 @@ import warnings from . import TTLCache warnings.warn( - "cachetools.ttl is deprecated, please use cachetools.TTLCache", DeprecationWarning + "cachetools.ttl is deprecated, please use cachetools.TTLCache", + DeprecationWarning, + stacklevel=2, ) -- cgit v1.2.3 From 3807e93115f4cc269564166d39765e63a3770380 Mon Sep 17 00:00:00 2001 From: Hugo van Kemenade Date: Mon, 11 Oct 2021 19:45:39 +0300 Subject: Add support for Python 3.10 --- .github/workflows/ci.yml | 2 +- setup.cfg | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index b0a9160..1139f90 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -9,7 +9,7 @@ jobs: strategy: fail-fast: false matrix: - python: ["3.6", "3.7", "3.8", "3.9", "pypy-3.6", "pypy-3.7"] + python: ["3.6", "3.7", "3.8", "3.9", "3.10", "pypy-3.6", "pypy-3.7"] steps: - uses: actions/checkout@v2 - uses: actions/setup-python@v2 diff --git a/setup.cfg b/setup.cfg index 143aba1..74bcbc5 100644 --- a/setup.cfg +++ b/setup.cfg @@ -21,6 +21,7 @@ classifiers = Programming Language :: Python :: 3.7 Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.9 + Programming Language :: Python :: 3.10 Topic :: Software Development :: Libraries :: Python Modules [options] -- cgit v1.2.3