summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--doc/en/reference.rst14
-rw-r--r--src/_pytest/_argcomplete.py48
-rw-r--r--src/_pytest/_code/code.py83
-rw-r--r--src/_pytest/_code/source.py41
-rw-r--r--src/_pytest/_io/saferepr.py17
-rw-r--r--src/_pytest/_io/terminalwriter.py14
-rw-r--r--src/_pytest/assertion/__init__.py19
-rw-r--r--src/_pytest/assertion/rewrite.py36
-rw-r--r--src/_pytest/assertion/truncate.py14
-rw-r--r--src/_pytest/assertion/util.py16
-rwxr-xr-xsrc/_pytest/cacheprovider.py70
-rw-r--r--src/_pytest/capture.py99
-rw-r--r--src/_pytest/compat.py74
-rw-r--r--src/_pytest/config/__init__.py219
-rw-r--r--src/_pytest/config/argparsing.py88
-rw-r--r--src/_pytest/config/exceptions.py6
-rw-r--r--src/_pytest/config/findpaths.py20
-rw-r--r--src/_pytest/debugging.py18
-rw-r--r--src/_pytest/deprecated.py5
-rw-r--r--src/_pytest/doctest.py87
-rw-r--r--src/_pytest/faulthandler.py6
-rw-r--r--src/_pytest/fixtures.py334
-rw-r--r--src/_pytest/freeze_support.py15
-rw-r--r--src/_pytest/helpconfig.py9
-rw-r--r--src/_pytest/hookspec.py228
-rw-r--r--src/_pytest/junitxml.py70
-rw-r--r--src/_pytest/logging.py136
-rw-r--r--src/_pytest/main.py40
-rw-r--r--src/_pytest/mark/__init__.py16
-rw-r--r--src/_pytest/mark/expression.py10
-rw-r--r--src/_pytest/mark/structures.py31
-rw-r--r--src/_pytest/monkeypatch.py90
-rw-r--r--src/_pytest/nodes.py153
-rw-r--r--src/_pytest/nose.py12
-rw-r--r--src/_pytest/outcomes.py84
-rw-r--r--src/_pytest/pastebin.py21
-rw-r--r--src/_pytest/pathlib.py101
-rw-r--r--src/_pytest/pytester.py260
-rw-r--r--src/_pytest/python.py240
-rw-r--r--src/_pytest/python_api.py79
-rw-r--r--src/_pytest/recwarn.py2
-rw-r--r--src/_pytest/reports.py94
-rw-r--r--src/_pytest/resultlog.py6
-rw-r--r--src/_pytest/runner.py41
-rw-r--r--src/_pytest/skipping.py6
-rw-r--r--src/_pytest/store.py6
-rw-r--r--src/_pytest/terminal.py68
-rw-r--r--src/_pytest/timing.py3
-rw-r--r--src/_pytest/tmpdir.py66
-rw-r--r--src/_pytest/unittest.py42
-rw-r--r--src/_pytest/warning_types.py2
-rw-r--r--src/_pytest/warnings.py25
-rw-r--r--src/pytest/__init__.py4
-rw-r--r--src/pytest/__main__.py4
-rw-r--r--testing/test_tmpdir.py6
55 files changed, 1601 insertions, 1697 deletions
diff --git a/doc/en/reference.rst b/doc/en/reference.rst
index 775dd556a..f4a68f160 100644
--- a/doc/en/reference.rst
+++ b/doc/en/reference.rst
@@ -182,7 +182,7 @@ Mark a test function as using the given fixture names.
.. py:function:: pytest.mark.usefixtures(*names)
- :param args: the names of the fixture to use, as strings
+ :param args: The names of the fixture to use, as strings.
.. note::
@@ -209,8 +209,10 @@ Marks a test function as *expected to fail*.
Condition for marking the test function as xfail (``True/False`` or a
:ref:`condition string <string conditions>`). If a bool, you also have
to specify ``reason`` (see :ref:`condition string <string conditions>`).
- :keyword str reason: Reason why the test function is marked as xfail.
- :keyword Exception raises: Exception subclass expected to be raised by the test function; other exceptions will fail the test.
+ :keyword str reason:
+ Reason why the test function is marked as xfail.
+ :keyword Type[Exception] raises:
+ Exception subclass expected to be raised by the test function; other exceptions will fail the test.
:keyword bool run:
If the test function should actually be executed. If ``False``, the function will always xfail and will
not be executed (useful if a function is segfaulting).
@@ -224,7 +226,7 @@ Marks a test function as *expected to fail*.
a new release of a library fixes a known bug).
-custom marks
+Custom marks
~~~~~~~~~~~~
Marks are created dynamically using the factory object ``pytest.mark`` and applied as a decorator.
@@ -473,7 +475,7 @@ caplog
.. autofunction:: _pytest.logging.caplog()
:no-auto-options:
- This returns a :class:`_pytest.logging.LogCaptureFixture` instance.
+ Returns a :class:`_pytest.logging.LogCaptureFixture` instance.
.. autoclass:: _pytest.logging.LogCaptureFixture
:members:
@@ -491,7 +493,7 @@ monkeypatch
.. autofunction:: _pytest.monkeypatch.monkeypatch()
:no-auto-options:
- This returns a :class:`MonkeyPatch` instance.
+ Returns a :class:`MonkeyPatch` instance.
.. autoclass:: _pytest.monkeypatch.MonkeyPatch
:members:
diff --git a/src/_pytest/_argcomplete.py b/src/_pytest/_argcomplete.py
index 7ca216ecf..3dbdf9318 100644
--- a/src/_pytest/_argcomplete.py
+++ b/src/_pytest/_argcomplete.py
@@ -1,7 +1,8 @@
-"""allow bash-completion for argparse with argcomplete if installed
-needs argcomplete>=0.5.6 for python 3.2/3.3 (older versions fail
+"""Allow bash-completion for argparse with argcomplete if installed.
+
+Needs argcomplete>=0.5.6 for python 3.2/3.3 (older versions fail
to find the magic string, so _ARGCOMPLETE env. var is never set, and
-this does not need special code.
+this does not need special code).
Function try_argcomplete(parser) should be called directly before
the call to ArgumentParser.parse_args().
@@ -10,8 +11,7 @@ The filescompleter is what you normally would use on the positional
arguments specification, in order to get "dirname/" after "dirn<TAB>"
instead of the default "dirname ":
- optparser.add_argument(Config._file_or_dir, nargs='*'
- ).completer=filescompleter
+ optparser.add_argument(Config._file_or_dir, nargs='*').completer=filescompleter
Other, application specific, completers should go in the file
doing the add_argument calls as they need to be specified as .completer
@@ -20,35 +20,43 @@ attribute points to will not be used).
SPEEDUP
=======
+
The generic argcomplete script for bash-completion
-(/etc/bash_completion.d/python-argcomplete.sh )
+(/etc/bash_completion.d/python-argcomplete.sh)
uses a python program to determine startup script generated by pip.
You can speed up completion somewhat by changing this script to include
# PYTHON_ARGCOMPLETE_OK
so the the python-argcomplete-check-easy-install-script does not
need to be called to find the entry point of the code and see if that is
-marked with PYTHON_ARGCOMPLETE_OK
+marked with PYTHON_ARGCOMPLETE_OK.
INSTALL/DEBUGGING
=================
+
To include this support in another application that has setup.py generated
scripts:
-- add the line:
+
+- Add the line:
# PYTHON_ARGCOMPLETE_OK
- near the top of the main python entry point
-- include in the file calling parse_args():
+ near the top of the main python entry point.
+
+- Include in the file calling parse_args():
from _argcomplete import try_argcomplete, filescompleter
- , call try_argcomplete just before parse_args(), and optionally add
- filescompleter to the positional arguments' add_argument()
+ Call try_argcomplete just before parse_args(), and optionally add
+ filescompleter to the positional arguments' add_argument().
+
If things do not work right away:
-- switch on argcomplete debugging with (also helpful when doing custom
+
+- Switch on argcomplete debugging with (also helpful when doing custom
completers):
export _ARC_DEBUG=1
-- run:
+
+- Run:
python-argcomplete-check-easy-install-script $(which appname)
echo $?
- will echo 0 if the magic line has been found, 1 if not
-- sometimes it helps to find early on errors using:
+ will echo 0 if the magic line has been found, 1 if not.
+
+- Sometimes it helps to find early on errors using:
_ARGCOMPLETE=1 _ARC_DEBUG=1 appname
which should throw a KeyError: 'COMPLINE' (which is properly set by the
global argcomplete script).
@@ -63,13 +71,13 @@ from typing import Optional
class FastFilesCompleter:
- "Fast file completer class"
+ """Fast file completer class."""
def __init__(self, directories: bool = True) -> None:
self.directories = directories
def __call__(self, prefix: str, **kwargs: Any) -> List[str]:
- """only called on non option completions"""
+ # Only called on non option completions.
if os.path.sep in prefix[1:]:
prefix_dir = len(os.path.dirname(prefix) + os.path.sep)
else:
@@ -77,7 +85,7 @@ class FastFilesCompleter:
completion = []
globbed = []
if "*" not in prefix and "?" not in prefix:
- # we are on unix, otherwise no bash
+ # We are on unix, otherwise no bash.
if not prefix or prefix[-1] == os.path.sep:
globbed.extend(glob(prefix + ".*"))
prefix += "*"
@@ -85,7 +93,7 @@ class FastFilesCompleter:
for x in sorted(globbed):
if os.path.isdir(x):
x += "/"
- # append stripping the prefix (like bash, not like compgen)
+ # Append stripping the prefix (like bash, not like compgen).
completion.append(x[prefix_dir:])
return completion
diff --git a/src/_pytest/_code/code.py b/src/_pytest/_code/code.py
index 219ebb68f..e0aadd724 100644
--- a/src/_pytest/_code/code.py
+++ b/src/_pytest/_code/code.py
@@ -71,9 +71,8 @@ class Code:
@property
def path(self) -> Union[py.path.local, str]:
- """Return a path object pointing to source code (or a str in case
- of OSError / non-existing file).
- """
+ """Return a path object pointing to source code, or an ``str`` in
+ case of ``OSError`` / non-existing file."""
if not self.raw.co_filename:
return ""
try:
@@ -420,15 +419,16 @@ class ExceptionInfo(Generic[_E]):
exc_info: Tuple["Type[_E]", "_E", TracebackType],
exprinfo: Optional[str] = None,
) -> "ExceptionInfo[_E]":
- """Returns an ExceptionInfo for an existing exc_info tuple.
+ """Return an ExceptionInfo for an existing exc_info tuple.
.. warning::
Experimental API
- :param exprinfo: a text string helping to determine if we should
- strip ``AssertionError`` from the output, defaults
- to the exception message/``__str__()``
+ :param exprinfo:
+ A text string helping to determine if we should strip
+ ``AssertionError`` from the output. Defaults to the exception
+ message/``__str__()``.
"""
_striptext = ""
if exprinfo is None and isinstance(exc_info[1], AssertionError):
@@ -444,15 +444,16 @@ class ExceptionInfo(Generic[_E]):
def from_current(
cls, exprinfo: Optional[str] = None
) -> "ExceptionInfo[BaseException]":
- """Returns an ExceptionInfo matching the current traceback.
+ """Return an ExceptionInfo matching the current traceback.
.. warning::
Experimental API
- :param exprinfo: a text string helping to determine if we should
- strip ``AssertionError`` from the output, defaults
- to the exception message/``__str__()``
+ :param exprinfo:
+ A text string helping to determine if we should strip
+ ``AssertionError`` from the output. Defaults to the exception
+ message/``__str__()``.
"""
tup = sys.exc_info()
assert tup[0] is not None, "no current exception"
@@ -467,7 +468,7 @@ class ExceptionInfo(Generic[_E]):
return cls(None)
def fill_unfilled(self, exc_info: Tuple["Type[_E]", _E, TracebackType]) -> None:
- """fill an unfilled ExceptionInfo created with for_later()"""
+ """Fill an unfilled ExceptionInfo created with ``for_later()``."""
assert self._excinfo is None, "ExceptionInfo was already filled"
self._excinfo = exc_info
@@ -568,7 +569,8 @@ class ExceptionInfo(Generic[_E]):
Show locals per traceback entry.
Ignored if ``style=="native"``.
- :param str style: long|short|no|native|value traceback style
+ :param str style:
+ long|short|no|native|value traceback style.
:param bool abspath:
If paths should be changed to absolute or left unchanged.
@@ -583,7 +585,8 @@ class ExceptionInfo(Generic[_E]):
:param bool truncate_locals:
With ``showlocals==True``, make sure locals can be safely represented as strings.
- :param bool chain: if chained exceptions in Python 3 should be shown.
+ :param bool chain:
+ If chained exceptions in Python 3 should be shown.
.. versionchanged:: 3.9
@@ -643,7 +646,7 @@ class FormattedExcinfo:
astcache = attr.ib(default=attr.Factory(dict), init=False, repr=False)
def _getindent(self, source: "Source") -> int:
- # figure out indent for given source
+ # Figure out indent for the given source.
try:
s = str(source.getstatement(len(source) - 1))
except KeyboardInterrupt:
@@ -704,7 +707,7 @@ class FormattedExcinfo:
) -> List[str]:
lines = []
indentstr = " " * indent
- # get the real exception information out
+ # Get the real exception information out.
exlines = excinfo.exconly(tryshort=True).split("\n")
failindent = self.fail_marker + indentstr[1:]
for line in exlines:
@@ -730,8 +733,7 @@ class FormattedExcinfo:
str_repr = saferepr(value)
else:
str_repr = safeformat(value)
- # if len(str_repr) < 70 or not isinstance(value,
- # (list, tuple, dict)):
+ # if len(str_repr) < 70 or not isinstance(value, (list, tuple, dict)):
lines.append("{:<10} = {}".format(name, str_repr))
# else:
# self._line("%-10s =\\" % (name,))
@@ -809,16 +811,17 @@ class FormattedExcinfo:
def _truncate_recursive_traceback(
self, traceback: Traceback
) -> Tuple[Traceback, Optional[str]]:
- """
- Truncate the given recursive traceback trying to find the starting point
- of the recursion.
+ """Truncate the given recursive traceback trying to find the starting
+ point of the recursion.
- The detection is done by going through each traceback entry and finding the
- point in which the locals of the frame are equal to the locals of a previous frame (see ``recursionindex()``.
+ The detection is done by going through each traceback entry and
+ finding the point in which the locals of the frame are equal to the
+ locals of a previous frame (see ``recursionindex()``).
- Handle the situation where the recursion process might raise an exception (for example
- comparing numpy arrays using equality raises a TypeError), in which case we do our best to
- warn the user of the error and show a limited traceback.
+ Handle the situation where the recursion process might raise an
+ exception (for example comparing numpy arrays using equality raises a
+ TypeError), in which case we do our best to warn the user of the
+ error and show a limited traceback.
"""
try:
recursionindex = traceback.recursionindex()
@@ -863,8 +866,8 @@ class FormattedExcinfo:
excinfo_._getreprcrash() if self.style != "value" else None
) # type: Optional[ReprFileLocation]
else:
- # fallback to native repr if the exception doesn't have a traceback:
- # ExceptionInfo objects require a full traceback to work
+ # Fallback to native repr if the exception doesn't have a traceback:
+ # ExceptionInfo objects require a full traceback to work.
reprtraceback = ReprTracebackNative(
traceback.format_exception(type(e), e, None)
)
@@ -915,7 +918,7 @@ class TerminalRepr:
# This class is abstract -- only subclasses are instantiated.
@attr.s(**{ATTRS_EQ_FIELD: False}) # type: ignore
class ExceptionRepr(TerminalRepr):
- # Provided by in subclasses.
+ # Provided by subclasses.
reprcrash = None # type: Optional[ReprFileLocation]
reprtraceback = None # type: ReprTraceback
@@ -942,7 +945,7 @@ class ExceptionChainRepr(ExceptionRepr):
def __attrs_post_init__(self) -> None:
super().__attrs_post_init__()
# reprcrash and reprtraceback of the outermost (the newest) exception
- # in the chain
+ # in the chain.
self.reprtraceback = self.chain[-1][0]
self.reprcrash = self.chain[-1][1]
@@ -974,7 +977,7 @@ class ReprTraceback(TerminalRepr):
entrysep = "_ "
def toterminal(self, tw: TerminalWriter) -> None:
- # the entries might have different styles
+ # The entries might have different styles.
for i, entry in enumerate(self.reprentries):
if entry.style == "long":
tw.line("")
@@ -1017,7 +1020,7 @@ class ReprEntry(TerminalRepr):
style = attr.ib(type="_TracebackStyle")
def _write_entry_lines(self, tw: TerminalWriter) -> None:
- """Writes the source code portions of a list of traceback entries with syntax highlighting.
+ """Write the source code portions of a list of traceback entries with syntax highlighting.
Usually entries are lines like these:
@@ -1099,8 +1102,8 @@ class ReprFileLocation(TerminalRepr):
message = attr.ib(type=str)
def toterminal(self, tw: TerminalWriter) -> None:
- # filename and lineno output for each entry,
- # using an output format that most editors understand
+ # Filename and lineno output for each entry, using an output format
+ # that most editors understand.
msg = self.message
i = msg.find("\n")
if i != -1:
@@ -1175,10 +1178,10 @@ def getfslineno(obj: object) -> Tuple[Union[str, py.path.local], int]:
return code.path, code.firstlineno
-# relative paths that we use to filter traceback entries from appearing to the user;
-# see filter_traceback
+# Relative paths that we use to filter traceback entries from appearing to the user;
+# see filter_traceback.
# note: if we need to add more paths than what we have now we should probably use a list
-# for better maintenance
+# for better maintenance.
_PLUGGY_DIR = py.path.local(pluggy.__file__.rstrip("oc"))
# pluggy is either a package or a single module depending on the version
@@ -1197,14 +1200,14 @@ def filter_traceback(entry: TracebackEntry) -> bool:
* internal traceback from pytest or its internal libraries, py and pluggy.
"""
# entry.path might sometimes return a str object when the entry
- # points to dynamically generated code
- # see https://bitbucket.org/pytest-dev/py/issues/71
+ # points to dynamically generated code.
+ # See https://bitbucket.org/pytest-dev/py/issues/71.
raw_filename = entry.frame.code.raw.co_filename
is_generated = "<" in raw_filename and ">" in raw_filename
if is_generated:
return False
# entry.path might point to a non-existing file, in which case it will
- # also return a str object. see #1133
+ # also return a str object. See #1133.
p = py.path.local(entry.path)
return (
not p.relto(_PLUGGY_DIR) and not p.relto(_PYTEST_DIR) and not p.relto(_PY_DIR)
diff --git a/src/_pytest/_code/source.py b/src/_pytest/_code/source.py
index 65560be2a..8338014ae 100644
--- a/src/_pytest/_code/source.py
+++ b/src/_pytest/_code/source.py
@@ -67,9 +67,7 @@ class Source:
return len(self.lines)
def strip(self) -> "Source":
- """ return new source object with trailing
- and leading blank lines removed.
- """
+ """Return new Source object with trailing and leading blank lines removed."""
start, end = 0, len(self)
while start < end and not self.lines[start].strip():
start += 1
@@ -80,31 +78,28 @@ class Source:
return source
def indent(self, indent: str = " " * 4) -> "Source":
- """ return a copy of the source object with
- all lines indented by the given indent-string.
- """
+ """Return a copy of the source object with all lines indented by the
+ given indent-string."""
newsource = Source()
newsource.lines = [(indent + line) for line in self.lines]
return newsource
def getstatement(self, lineno: int) -> "Source":
- """ return Source statement which contains the
- given linenumber (counted from 0).
- """
+ """Return Source statement which contains the given linenumber
+ (counted from 0)."""
start, end = self.getstatementrange(lineno)
return self[start:end]
def getstatementrange(self, lineno: int) -> Tuple[int, int]:
- """ return (start, end) tuple which spans the minimal
- statement region which containing the given lineno.
- """
+ """Return (start, end) tuple which spans the minimal statement region
+ which containing the given lineno."""
if not (0 <= lineno < len(self)):
raise IndexError("lineno out of range")
ast, start, end = getstatementrange_ast(lineno, self)
return start, end
def deindent(self) -> "Source":
- """return a new source object deindented."""
+ """Return a new Source object deindented."""
newsource = Source()
newsource.lines[:] = deindent(self.lines)
return newsource
@@ -129,7 +124,7 @@ def findsource(obj) -> Tuple[Optional[Source], int]:
def getrawcode(obj, trycall: bool = True):
- """ return code object for given function. """
+ """Return code object for given function."""
try:
return obj.__code__
except AttributeError:
@@ -148,8 +143,8 @@ def deindent(lines: Iterable[str]) -> List[str]:
def get_statement_startend2(lineno: int, node: ast.AST) -> Tuple[int, Optional[int]]:
- # flatten all statements and except handlers into one lineno-list
- # AST's line numbers start indexing at 1
+ # Flatten all statements and except handlers into one lineno-list.
+ # AST's line numbers start indexing at 1.
values = [] # type: List[int]
for x in ast.walk(node):
if isinstance(x, (ast.stmt, ast.ExceptHandler)):
@@ -157,7 +152,7 @@ def get_statement_startend2(lineno: int, node: ast.AST) -> Tuple[int, Optional[i
for name in ("finalbody", "orelse"):
val = getattr(x, name, None) # type: Optional[List[ast.stmt]]
if val:
- # treat the finally/orelse part as its own statement
+ # Treat the finally/orelse part as its own statement.
values.append(val[0].lineno - 1 - 1)
values.sort()
insert_index = bisect_right(values, lineno)
@@ -178,13 +173,13 @@ def getstatementrange_ast(
if astnode is None:
content = str(source)
# See #4260:
- # don't produce duplicate warnings when compiling source to find ast
+ # Don't produce duplicate warnings when compiling source to find AST.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
astnode = ast.parse(content, "source", "exec")
start, end = get_statement_startend2(lineno, astnode)
- # we need to correct the end:
+ # We need to correct the end:
# - ast-parsing strips comments
# - there might be empty lines
# - we might have lesser indented code blocks at the end
@@ -192,10 +187,10 @@ def getstatementrange_ast(
end = len(source.lines)
if end > start + 1:
- # make sure we don't span differently indented code blocks
- # by using the BlockFinder helper used which inspect.getsource() uses itself
+ # Make sure we don't span differently indented code blocks
+ # by using the BlockFinder helper used which inspect.getsource() uses itself.
block_finder = inspect.BlockFinder()
- # if we start with an indented line, put blockfinder to "started" mode
+ # If we start with an indented line, put blockfinder to "started" mode.
block_finder.started = source.lines[start][0].isspace()
it = ((x + "\n") for x in source.lines[start:end])
try:
@@ -206,7 +201,7 @@ def getstatementrange_ast(
except Exception:
pass
- # the end might still point to a comment or empty line, correct it
+ # The end might still point to a comment or empty line, correct it.
while end:
line = source.lines[end - 1].lstrip()
if line.startswith("#") or not line:
diff --git a/src/_pytest/_io/saferepr.py b/src/_pytest/_io/saferepr.py
index 823b8d719..9a4975f61 100644
--- a/src/_pytest/_io/saferepr.py
+++ b/src/_pytest/_io/saferepr.py
@@ -36,9 +36,8 @@ def _ellipsize(s: str, maxsize: int) -> str:
class SafeRepr(reprlib.Repr):
- """subclass of repr.Repr that limits the resulting size of repr()
- and includes information on exceptions raised during the call.
- """
+ """repr.Repr that limits the resulting size of repr() and includes
+ information on exceptions raised during the call."""
def __init__(self, maxsize: int) -> None:
super().__init__()
@@ -65,7 +64,8 @@ class SafeRepr(reprlib.Repr):
def safeformat(obj: object) -> str:
- """return a pretty printed string for the given object.
+ """Return a pretty printed string for the given object.
+
Failing __repr__ functions of user instances will be represented
with a short exception info.
"""
@@ -76,11 +76,14 @@ def safeformat(obj: object) -> str:
def saferepr(obj: object, maxsize: int = 240) -> str:
- """return a size-limited safe repr-string for the given object.
+ """Return a size-limited safe repr-string for the given object.
+
Failing __repr__ functions of user instances will be represented
with a short exception info and 'saferepr' generally takes
- care to never raise exceptions itself. This function is a wrapper
- around the Repr/reprlib functionality of the standard 2.6 lib.
+ care to never raise exceptions itself.
+
+ This function is a wrapper around the Repr/reprlib functionality of the
+ standard 2.6 lib.
"""
return SafeRepr(maxsize).repr(obj)
diff --git a/src/_pytest/_io/terminalwriter.py b/src/_pytest/_io/terminalwriter.py
index 5ffc550db..0afe4a0ed 100644
--- a/src/_pytest/_io/terminalwriter.py
+++ b/src/_pytest/_io/terminalwriter.py
@@ -111,13 +111,13 @@ class TerminalWriter:
) -> None:
if fullwidth is None:
fullwidth = self.fullwidth
- # the goal is to have the line be as long as possible
- # under the condition that len(line) <= fullwidth
+ # The goal is to have the line be as long as possible
+ # under the condition that len(line) <= fullwidth.
if sys.platform == "win32":
- # if we print in the last column on windows we are on a
+ # If we print in the last column on windows we are on a
# new line but there is no way to verify/neutralize this
- # (we may not know the exact line width)
- # so let's be defensive to avoid empty lines in the output
+ # (we may not know the exact line width).
+ # So let's be defensive to avoid empty lines in the output.
fullwidth -= 1
if title is not None:
# we want 2 + 2*len(fill) + len(title) <= fullwidth
@@ -131,9 +131,9 @@ class TerminalWriter:
# we want len(sepchar)*N <= fullwidth
# i.e. N <= fullwidth // len(sepchar)
line = sepchar * (fullwidth // len(sepchar))
- # in some situations there is room for an extra sepchar at the right,
+ # In some situations there is room for an extra sepchar at the right,
# in particular if we consider that with a sepchar like "_ " the
- # trailing space is not important at the end of the line
+ # trailing space is not important at the end of the line.
if len(line) + len(sepchar.rstrip()) <= fullwidth:
line += sepchar.rstrip()
diff --git a/src/_pytest/assertion/__init__.py b/src/_pytest/assertion/__init__.py
index 64d2267e7..bf9dadf4b 100644
--- a/src/_pytest/assertion/__init__.py
+++ b/src/_pytest/assertion/__init__.py
@@ -1,6 +1,4 @@
-"""
-support for presenting detailed information in failing assertions.
-"""
+"""Support for presenting detailed information in failing assertions."""
import sys
from typing import Any
from typing import Generator
@@ -55,7 +53,7 @@ def register_assert_rewrite(*names: str) -> None:
actually imported, usually in your __init__.py if you are a plugin
using a package.
- :raise TypeError: if the given module names are not strings.
+ :raises TypeError: If the given module names are not strings.
"""
for name in names:
if not isinstance(name, str):
@@ -105,9 +103,9 @@ def install_importhook(config: Config) -> rewrite.AssertionRewritingHook:
def pytest_collection(session: "Session") -> None:
- # this hook is only called when test modules are collected
+ # This hook is only called when test modules are collected
# so for example not in the master process of pytest-xdist
- # (which does not collect test modules)
+ # (which does not collect test modules).
assertstate = session.config._store.get(assertstate_key, None)
if assertstate:
if assertstate.hook is not None:
@@ -116,18 +114,17 @@ def pytest_collection(session: "Session") -> None:
@hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_protocol(item: Item) -> Generator[None, None, None]:
- """Setup the pytest_assertrepr_compare and pytest_assertion_pass hooks
+ """Setup the pytest_assertrepr_compare and pytest_assertion_pass hooks.
- The rewrite module will use util._reprcompare if
- it exists to use custom reporting via the
- pytest_assertrepr_compare hook. This sets up this custom
+ The rewrite module will use util._reprcompare if it exists to use custom
+ reporting via the pytest_assertrepr_compare hook. This sets up this custom
comparison for the test.
"""
ihook = item.ihook
def callbinrepr(op, left: object, right: object) -> Optional[str]:
- """Call the pytest_assertrepr_compare hook and prepare the result
+ """Call the pytest_assertrepr_compare hook and prepare the result.
This uses the first result from the hook and then ensures the
following:
diff --git a/src/_pytest/assertion/rewrite.py b/src/_pytest/assertion/rewrite.py
index e77b1b0b8..730d5382a 100644
--- a/src/_pytest/assertion/rewrite.py
+++ b/src/_pytest/assertion/rewrite.py
@@ -1,4 +1,4 @@
-"""Rewrite assertion AST to produce nice error messages"""
+"""Rewrite assertion AST to produce nice error messages."""
import ast
import errno
import functools
@@ -170,7 +170,7 @@ class AssertionRewritingHook(importlib.abc.MetaPathFinder, importlib.abc.Loader)
exec(co, module.__dict__)
def _early_rewrite_bailout(self, name: str, state: "AssertionState") -> bool:
- """This is a fast way to get out of rewriting modules.
+ """A fast way to get out of rewriting modules.
Profiling has shown that the call to PathFinder.find_spec (inside of
the find_spec from this class) is a major slowdown, so, this method
@@ -350,7 +350,7 @@ else:
def _rewrite_test(fn: Path, config: Config) -> Tuple[os.stat_result, types.CodeType]:
- """read and rewrite *fn* and return the code object."""
+ """Read and rewrite *fn* and return the code object."""
fn_ = fspath(fn)
stat = os.stat(fn_)
with open(fn_, "rb") as f:
@@ -411,7 +411,7 @@ def rewrite_asserts(
def _saferepr(obj: object) -> str:
- """Get a safe repr of an object for assertion error messages.
+ r"""Get a safe repr of an object for assertion error messages.
The assertion formatting (util.format_explanation()) requires
newlines to be escaped since they are a special character for it.
@@ -419,18 +419,16 @@ def _saferepr(obj: object) -> str:
custom repr it is possible to contain one of the special escape
sequences, especially '\n{' and '\n}' are likely to be present in
JSON reprs.
-
"""
return saferepr(obj).replace("\n", "\\n")
def _format_assertmsg(obj: object) -> str:
- """Format the custom assertion message given.
+ r"""Format the custom assertion message given.
For strings this simply replaces newlines with '\n~' so that
util.format_explanation() will preserve them instead of escaping
newlines. For other objects saferepr() is used first.
-
"""
# reprlib appears to have a bug which means that if a string
# contains a newline it gets escaped, however if an object has a
@@ -491,8 +489,8 @@ def _call_assertion_pass(lineno: int, orig: str, expl: str) -> None:
def _check_if_assertion_pass_impl() -> bool:
- """Checks if any plugins implement the pytest_assertion_pass hook
- in order not to generate explanation unecessarily (might be expensive)"""
+ """Check if any plugins implement the pytest_assertion_pass hook
+ in order not to generate explanation unecessarily (might be expensive)."""
return True if util._assertion_pass else False
@@ -541,7 +539,7 @@ def set_location(node, lineno, col_offset):
def _get_assertion_exprs(src: bytes) -> Dict[int, str]:
- """Returns a mapping from {lineno: "assertion test expression"}"""
+ """Return a mapping from {lineno: "assertion test expression"}."""
ret = {} # type: Dict[int, str]
depth = 0
@@ -645,7 +643,6 @@ class AssertionRewriter(ast.NodeVisitor):
This state is reset on every new assert statement visited and used
by the other visitors.
-
"""
def __init__(
@@ -770,7 +767,6 @@ class AssertionRewriter(ast.NodeVisitor):
current formatting context, e.g. ``%(py0)s``. The placeholder
and expr are placed in the current format context so that it
can be used on the next call to .pop_format_context().
-
"""
specifier = "py" + str(next(self.variable_counter))
self.explanation_specifiers[specifier] = expr
@@ -785,7 +781,6 @@ class AssertionRewriter(ast.NodeVisitor):
.explanation_param(). Finally .pop_format_context() is used
to format a string of %-formatted values as added by
.explanation_param().
-
"""
self.explanation_specifiers = {} # type: Dict[str, ast.expr]
self.stack.append(self.explanation_specifiers)
@@ -797,7 +792,6 @@ class AssertionRewriter(ast.NodeVisitor):
the %-placeholders created by .explanation_param(). This will
add the required code to format said string to .expl_stmts and
return the ast.Name instance of the formatted string.
-
"""
current = self.stack.pop()
if self.stack:
@@ -824,7 +818,6 @@ class AssertionRewriter(ast.NodeVisitor):
intermediate values and replace it with an if statement which
raises an assertion error with a detailed explanation in case
the expression is false.
-
"""
if isinstance(assert_.test, ast.Tuple) and len(assert_.test.elts) >= 1:
from _pytest.warning_types import PytestAssertRewriteWarning
@@ -994,9 +987,6 @@ class AssertionRewriter(ast.NodeVisitor):
return res, explanation
def visit_Call(self, call: ast.Call) -> Tuple[ast.Name, str]:
- """
- visit `ast.Call` nodes
- """
new_func, func_expl = self.visit(call.func)
arg_expls = []
new_args = []
@@ -1021,7 +1011,7 @@ class AssertionRewriter(ast.NodeVisitor):
return res, outer_expl
def visit_Starred(self, starred: ast.Starred) -> Tuple[ast.Starred, str]:
- # From Python 3.5, a Starred node can appear in a function call
+ # From Python 3.5, a Starred node can appear in a function call.
res, expl = self.visit(starred.value)
new_starred = ast.Starred(res, starred.ctx)
return new_starred, "*" + expl
@@ -1076,8 +1066,10 @@ class AssertionRewriter(ast.NodeVisitor):
def try_makedirs(cache_dir: Path) -> bool:
- """Attempts to create the given directory and sub-directories exist, returns True if
- successful or it already exists"""
+ """Attempt to create the given directory and sub-directories exist.
+
+ Returns True if successful or if it already exists.
+ """
try:
os.makedirs(fspath(cache_dir), exist_ok=True)
except (FileNotFoundError, NotADirectoryError, FileExistsError):
@@ -1096,7 +1088,7 @@ def try_makedirs(cache_dir: Path) -> bool:
def get_cache_dir(file_path: Path) -> Path:
- """Returns the cache directory to write .pyc files for the given .py file path"""
+ """Return the cache directory to write .pyc files for the given .py file path."""
if sys.version_info >= (3, 8) and sys.pycache_prefix:
# given:
# prefix = '/tmp/pycs'
diff --git a/src/_pytest/assertion/truncate.py b/src/_pytest/assertion/truncate.py
index fb2bf9c8e..c572cc744 100644
--- a/src/_pytest/assertion/truncate.py
+++ b/src/_pytest/assertion/truncate.py
@@ -1,5 +1,4 @@
-"""
-Utilities for truncating assertion output.
+"""Utilities for truncating assertion output.
Current default behaviour is to truncate assertion explanations at
~8 terminal lines, unless running in "-vv" mode or running on CI.
@@ -19,18 +18,14 @@ USAGE_MSG = "use '-vv' to show"
def truncate_if_required(
explanation: List[str], item: Item, max_length: Optional[int] = None
) -> List[str]:
- """
- Truncate this assertion explanation if the given test item is eligible.
- """
+ """Truncate this assertion explanation if the given test item is eligible."""
if _should_truncate_item(item):
return _truncate_explanation(explanation)
return explanation
def _should_truncate_item(item: Item) -> bool:
- """
- Whether or not this test item is eligible for truncation.
- """
+ """Whether or not this test item is eligible for truncation."""
verbose = item.config.option.verbose
return verbose < 2 and not _running_on_ci()
@@ -46,8 +41,7 @@ def _truncate_explanation(
max_lines: Optional[int] = None,
max_chars: Optional[int] = None,
) -> List[str]:
- """
- Truncate given list of strings that makes up the assertion explanation.
+ """Truncate given list of strings that makes up the assertion explanation.
Truncates to either 8 lines, or 640 characters - whichever the input reaches
first. The remaining lines will be replaced by a usage message.
diff --git a/src/_pytest/assertion/util.py b/src/_pytest/assertion/util.py
index 554aec77f..e80e476c8 100644
--- a/src/_pytest/assertion/util.py
+++ b/src/_pytest/assertion/util.py
@@ -1,4 +1,4 @@
-"""Utilities for assertion debugging"""
+"""Utilities for assertion debugging."""
import collections.abc
import pprint
from typing import AbstractSet
@@ -30,7 +30,7 @@ _assertion_pass = None # type: Optional[Callable[[int, str, str], None]]
def format_explanation(explanation: str) -> str:
- """This formats an explanation
+ r"""Format an explanation.
Normally all embedded newlines are escaped, however there are
three exceptions: \n{, \n} and \n~. The first two are intended
@@ -45,7 +45,7 @@ def format_explanation(explanation: str) -> str:
def _split_explanation(explanation: str) -> List[str]:
- """Return a list of individual lines in the explanation
+ r"""Return a list of individual lines in the explanation.
This will return a list of lines split on '\n{', '\n}' and '\n~'.
Any other newlines will be escaped and appear in the line as the
@@ -62,11 +62,11 @@ def _split_explanation(explanation: str) -> List[str]:
def _format_lines(lines: Sequence[str]) -> List[str]:
- """Format the individual lines
+ """Format the individual lines.
- This will replace the '{', '}' and '~' characters of our mini
- formatting language with the proper 'where ...', 'and ...' and ' +
- ...' text, taking care of indentation along the way.
+ This will replace the '{', '}' and '~' characters of our mini formatting
+ language with the proper 'where ...', 'and ...' and ' + ...' text, taking
+ care of indentation along the way.
Return a list of formatted lines.
"""
@@ -129,7 +129,7 @@ def isiterable(obj: Any) -> bool:
def assertrepr_compare(config, op: str, left: Any, right: Any) -> Optional[List[str]]:
- """Return specialised explanations for some operators/operands"""
+ """Return specialised explanations for some operators/operands."""
verbose = config.getoption("verbose")
if verbose > 1:
left_repr = safeformat(left)
diff --git a/src/_pytest/cacheprovider.py b/src/_pytest/cacheprovider.py
index de7ee9149..41c258271 100755
--- a/src/_pytest/cacheprovider.py
+++ b/src/_pytest/cacheprovider.py
@@ -1,9 +1,6 @@
-"""
-merged implementation of the cache provider
-
-the name cache was not chosen to ensure pluggy automatically
-ignores the external pytest-cache
-"""
+"""Implementation of the cache provider."""
+# This plugin was not named "cache" to avoid conflicts with the external
+# pytest-cache version.
import json
import os
from typing import Dict
@@ -73,7 +70,7 @@ class Cache:
@classmethod
def clear_cache(cls, cachedir: Path) -> None:
- """Clears the sub-directories used to hold cached directories and values."""
+ """Clear the sub-directories used to hold cached directories and values."""
for prefix in (cls._CACHE_PREFIX_DIRS, cls._CACHE_PREFIX_VALUES):
d = cachedir / prefix
if d.is_dir():
@@ -94,14 +91,16 @@ class Cache:
)
def makedir(self, name: str) -> py.path.local:
- """ return a directory path object with the given name. If the
- directory does not yet exist, it will be created. You can use it
- to manage files likes e. g. store/retrieve database
- dumps across test sessions.
-
- :param name: must be a string not containing a ``/`` separator.
- Make sure the name contains your plugin or application
- identifiers to prevent clashes with other cache users.
+ """Return a directory path object with the given name.
+
+ If the directory does not yet exist, it will be created. You can use
+ it to manage files to e.g. store/retrieve database dumps across test
+ sessions.
+
+ :param name:
+ Must be a string not containing a ``/`` separator.
+ Make sure the name contains your plugin or application
+ identifiers to prevent clashes with other cache users.
"""
path = Path(name)
if len(path.parts) > 1:
@@ -114,15 +113,16 @@ class Cache:
return self._cachedir.joinpath(self._CACHE_PREFIX_VALUES, Path(key))
def get(self, key: str, default):
- """ return cached value for the given key. If no value
- was yet cached or the value cannot be read, the specified
- default is returned.
+ """Return the cached value for the given key.
- :param key: must be a ``/`` separated value. Usually the first
- name is the name of your plugin or your application.
- :param default: must be provided in case of a cache-miss or
- invalid cache values.
+ If no value was yet cached or the value cannot be read, the specified
+ default is returned.
+ :param key:
+ Must be a ``/`` separated value. Usually the first
+ name is the name of your plugin or your application.
+ :param default:
+ The value to return in case of a cache-miss or invalid cache value.
"""
path = self._getvaluepath(key)
try:
@@ -132,13 +132,14 @@ class Cache:
return default
def set(self, key: str, value: object) -> None:
- """ save value for the given key.
-
- :param key: must be a ``/`` separated value. Usually the first
- name is the name of your plugin or your application.
- :param value: must be of any combination of basic
- python types, including nested types
- like e. g. lists of dictionaries.
+ """Save value for the given key.
+
+ :param key:
+ Must be a ``/`` separated value. Usually the first
+ name is the name of your plugin or your application.
+ :param value:
+ Must be of any combination of basic python types,
+ including nested types like lists of dictionaries.
"""
path = self._getvaluepath(key)
try:
@@ -241,7 +242,7 @@ class LFPluginCollSkipfiles:
class LFPlugin:
- """ Plugin which implements the --lf (run last-failing) option """
+ """Plugin which implements the --lf (run last-failing) option."""
def __init__(self, config: Config) -> None:
self.config = config
@@ -262,7 +263,7 @@ class LFPlugin:
)
def get_last_failed_paths(self) -> Set[Path]:
- """Returns a set with all Paths()s of the previously failed nodeids."""
+ """Return a set with all Paths()s of the previously failed nodeids."""
rootpath = Path(str(self.config.rootdir))
result = {rootpath / nodeid.split("::")[0] for nodeid in self.lastfailed}
return {x for x in result if x.exists()}
@@ -351,7 +352,7 @@ class LFPlugin:
class NFPlugin:
- """ Plugin which implements the --nf (run new-first) option """
+ """Plugin which implements the --nf (run new-first) option."""
def __init__(self, config: Config) -> None:
self.config = config
@@ -471,13 +472,12 @@ def pytest_configure(config: Config) -> None:
@pytest.fixture
def cache(request: FixtureRequest) -> Cache:
- """
- Return a cache object that can persist state between testing sessions.
+ """Return a cache object that can persist state between testing sessions.
cache.get(key, default)
cache.set(key, value)
- Keys must be a ``/`` separated value, where the first part is usually the
+ Keys must be ``/`` separated strings, where the first part is usually the
name of your plugin or application to avoid clashes with other cache users.
Values can be any object handled by the json stdlib module.
diff --git a/src/_pytest/capture.py b/src/_pytest/capture.py
index f538b67ec..90f5f9f3f 100644
--- a/src/_pytest/capture.py
+++ b/src/_pytest/capture.py
@@ -1,7 +1,4 @@
-"""
-per-test stdout/stderr capturing mechanism.
-
-"""
+"""Per-test stdout/stderr capturing mechanism."""
import collections
import contextlib
import io
@@ -49,8 +46,7 @@ def pytest_addoption(parser: Parser) -> None:
def _colorama_workaround() -> None:
- """
- Ensure colorama is imported so that it attaches to the correct stdio
+ """Ensure colorama is imported so that it attaches to the correct stdio
handles on Windows.
colorama uses the terminal on import time. So if something does the
@@ -65,8 +61,7 @@ def _colorama_workaround() -> None:
def _readline_workaround() -> None:
- """
- Ensure readline is imported so that it attaches to the correct stdio
+ """Ensure readline is imported so that it attaches to the correct stdio
handles on Windows.
Pdb uses readline support where available--when not running from the Python
@@ -80,7 +75,7 @@ def _readline_workaround() -> None:
workaround ensures that readline is imported before I/O capture is setup so
that it can attach to the actual stdin/out for the console.
- See https://github.com/pytest-dev/pytest/pull/1281
+ See https://github.com/pytest-dev/pytest/pull/1281.
"""
if sys.platform.startswith("win32"):
try:
@@ -90,8 +85,9 @@ def _readline_workaround() -> None:
def _py36_windowsconsoleio_workaround(stream: TextIO) -> None:
- """
- Python 3.6 implemented unicode console handling for Windows. This works
+ """Workaround for Windows Unicode console handling on Python>=3.6.
+
+ Python 3.6 implemented Unicode console handling for Windows. This works
by reading/writing to the raw console handle using
``{Read,Write}ConsoleW``.
@@ -106,10 +102,11 @@ def _py36_windowsconsoleio_workaround(stream: TextIO) -> None:
also means a different handle by replicating the logic in
"Py_lifecycle.c:initstdio/create_stdio".
- :param stream: in practice ``sys.stdout`` or ``sys.stderr``, but given
+ :param stream:
+ In practice ``sys.stdout`` or ``sys.stderr``, but given
here as parameter for unittesting purposes.
- See https://github.com/pytest-dev/py/issues/103
+ See https://github.com/pytest-dev/py/issues/103.
"""
if (
not sys.platform.startswith("win32")
@@ -118,7 +115,7 @@ def _py36_windowsconsoleio_workaround(stream: TextIO) -> None:
):
return
- # bail out if ``stream`` doesn't seem like a proper ``io`` stream (#2666)
+ # Bail out if ``stream`` doesn't seem like a proper ``io`` stream (#2666).
if not hasattr(stream, "buffer"):
return
@@ -158,10 +155,10 @@ def pytest_load_initial_conftests(early_config: Config):
capman = CaptureManager(ns.capture)
pluginmanager.register(capman, "capturemanager")
- # make sure that capturemanager is properly reset at final shutdown
+ # Make sure that capturemanager is properly reset at final shutdown.
early_config.add_cleanup(capman.stop_global_capturing)
- # finally trigger conftest loading but while capturing (issue93)
+ # Finally trigger conftest loading but while capturing (issue #93).
capman.start_global_capturing()
outcome = yield
capman.suspend_global_capture()
@@ -347,9 +344,9 @@ class SysCapture(SysCaptureBinary):
class FDCaptureBinary:
- """Capture IO to/from a given os-level filedescriptor.
+ """Capture IO to/from a given OS-level file descriptor.
- snap() produces `bytes`
+ snap() produces `bytes`.
"""
EMPTY_BUFFER = b""
@@ -415,7 +412,7 @@ class FDCaptureBinary:
)
def start(self) -> None:
- """ Start capturing on targetfd using memorized tmpfile. """
+ """Start capturing on targetfd using memorized tmpfile."""
self._assert_state("start", ("initialized",))
os.dup2(self.tmpfile.fileno(), self.targetfd)
self.syscapture.start()
@@ -430,8 +427,8 @@ class FDCaptureBinary:
return res
def done(self) -> None:
- """ stop capturing, restore streams, return original capture file,
- seeked to position zero. """
+ """Stop capturing, restore streams, return original capture file,
+ seeked to position zero."""
self._assert_state("done", ("initialized", "started", "suspended", "done"))
if self._state == "done":
return
@@ -462,15 +459,15 @@ class FDCaptureBinary:
self._state = "started"
def writeorg(self, data):
- """ write to original file descriptor. """
+ """Write to original file descriptor."""
self._assert_state("writeorg", ("started", "suspended"))
os.write(self.targetfd_save, data)
class FDCapture(FDCaptureBinary):
- """Capture IO to/from a given os-level filedescriptor.
+ """Capture IO to/from a given OS-level file descriptor.
- snap() produces text
+ snap() produces text.
"""
# Ignore type because it doesn't match the type in the superclass (bytes).
@@ -485,7 +482,7 @@ class FDCapture(FDCaptureBinary):
return res
def writeorg(self, data):
- """ write to original file descriptor. """
+ """Write to original file descriptor."""
super().writeorg(data.encode("utf-8")) # XXX use encoding of original stream
@@ -518,7 +515,7 @@ class MultiCapture:
self.err.start()
def pop_outerr_to_orig(self):
- """ pop current snapshot out/err capture and flush to orig streams. """
+ """Pop current snapshot out/err capture and flush to orig streams."""
out, err = self.readouterr()
if out:
self.out.writeorg(out)
@@ -547,7 +544,7 @@ class MultiCapture:
self._in_suspended = False
def stop_capturing(self) -> None:
- """ stop capturing and reset capturing streams """
+ """Stop capturing and reset capturing streams."""
if self._state == "stopped":
raise ValueError("was already stopped")
self._state = "stopped"
@@ -588,16 +585,22 @@ def _get_multicapture(method: "_CaptureMethod") -> MultiCapture:
class CaptureManager:
- """
- Capture plugin, manages that the appropriate capture method is enabled/disabled during collection and each
- test phase (setup, call, teardown). After each of those points, the captured output is obtained and
- attached to the collection/runtest report.
+ """The capture plugin.
+
+ Manages that the appropriate capture method is enabled/disabled during
+ collection and each test phase (setup, call, teardown). After each of
+ those points, the captured output is obtained and attached to the
+ collection/runtest report.
There are two levels of capture:
- * global: which is enabled by default and can be suppressed by the ``-s`` option. This is always enabled/disabled
- during collection and each test phase.
- * fixture: when a test function or one of its fixture depend on the ``capsys`` or ``capfd`` fixtures. In this
- case special handling is needed to ensure the fixtures take precedence over the global capture.
+
+ * global: enabled by default and can be suppressed by the ``-s``
+ option. This is always enabled/disabled during collection and each test
+ phase.
+
+ * fixture: when a test function or one of its fixture depend on the
+ ``capsys`` or ``capfd`` fixtures. In this case special handling is
+ needed to ensure the fixtures take precedence over the global capture.
"""
def __init__(self, method: "_CaptureMethod") -> None:
@@ -673,14 +676,13 @@ class CaptureManager:
self._capture_fixture = None
def activate_fixture(self) -> None:
- """If the current item is using ``capsys`` or ``capfd``, activate them so they take precedence over
- the global capture.
- """
+ """If the current item is using ``capsys`` or ``capfd``, activate
+ them so they take precedence over the global capture."""
if self._capture_fixture:
self._capture_fixture._start()
def deactivate_fixture(self) -> None:
- """Deactivates the ``capsys`` or ``capfd`` fixture of this item, if any."""
+ """Deactivate the ``capsys`` or ``capfd`` fixture of this item, if any."""
if self._capture_fixture:
self._capture_fixture.close()
@@ -759,10 +761,8 @@ class CaptureManager:
class CaptureFixture:
- """
- Object returned by :py:func:`capsys`, :py:func:`capsysbinary`, :py:func:`capfd` and :py:func:`capfdbinary`
- fixtures.
- """
+ """Object returned by the :py:func:`capsys`, :py:func:`capsysbinary`,
+ :py:func:`capfd` and :py:func:`capfdbinary` fixtures."""
def __init__(self, captureclass, request: SubRequest) -> None:
self.captureclass = captureclass
@@ -787,9 +787,12 @@ class CaptureFixture:
self._capture = None
def readouterr(self):
- """Read and return the captured output so far, resetting the internal buffer.
+ """Read and return the captured output so far, resetting the internal
+ buffer.
- :return: captured content as a namedtuple with ``out`` and ``err`` string attributes
+ :returns:
+ The captured content as a namedtuple with ``out`` and ``err``
+ string attributes.
"""
captured_out, captured_err = self._captured_out, self._captured_err
if self._capture is not None:
@@ -801,18 +804,18 @@ class CaptureFixture:
return CaptureResult(captured_out, captured_err)
def _suspend(self) -> None:
- """Suspends this fixture's own capturing temporarily."""
+ """Suspend this fixture's own capturing temporarily."""
if self._capture is not None:
self._capture.suspend_capturing()
def _resume(self) -> None:
- """Resumes this fixture's own capturing temporarily."""
+ """Resume this fixture's own capturing temporarily."""
if self._capture is not None:
self._capture.resume_capturing()
@contextlib.contextmanager
def disabled(self) -> Generator[None, None, None]:
- """Temporarily disables capture while inside the 'with' block."""
+ """Temporarily disable capturing while inside the ``with`` block."""
capmanager = self.request.config.pluginmanager.getplugin("capturemanager")
with capmanager.global_and_fixture_disabled():
yield
diff --git a/src/_pytest/compat.py b/src/_pytest/compat.py
index 4ff59e1fb..93232f1bf 100644
--- a/src/_pytest/compat.py
+++ b/src/_pytest/compat.py
@@ -1,6 +1,4 @@
-"""
-python version compatibility code
-"""
+"""Python version compatibility code."""
import enum
import functools
import inspect
@@ -73,8 +71,7 @@ if sys.version_info < (3, 6):
def fspath(p):
"""os.fspath replacement, useful to point out when we should replace it by the
- real function once we drop py35.
- """
+ real function once we drop py35."""
return str(p)
@@ -88,8 +85,7 @@ def is_generator(func: object) -> bool:
def iscoroutinefunction(func: object) -> bool:
- """
- Return True if func is a coroutine function (a function defined with async
+ """Return True if func is a coroutine function (a function defined with async
def syntax, and doesn't contain yield), or a function decorated with
@asyncio.coroutine.
@@ -101,7 +97,8 @@ def iscoroutinefunction(func: object) -> bool:
def is_async_function(func: object) -> bool:
- """Return True if the given function seems to be an async function or async generator"""
+ """Return True if the given function seems to be an async function or
+ an async generator."""
return iscoroutinefunction(func) or (
sys.version_info >= (3, 6) and inspect.isasyncgenfunction(func)
)
@@ -119,7 +116,7 @@ def getlocation(function, curdir=None) -> str:
def num_mock_patch_args(function) -> int:
- """ return number of arguments used up by mock arguments (if any) """
+ """Return number of arguments used up by mock arguments (if any)."""
patchings = getattr(function, "patchings", None)
if not patchings:
return 0
@@ -144,13 +141,13 @@ def getfuncargnames(
is_method: bool = False,
cls: Optional[type] = None
) -> Tuple[str, ...]:
- """Returns the names of a function's mandatory arguments.
+ """Return the names of a function's mandatory arguments.
- This should return the names of all function arguments that:
- * Aren't bound to an instance or type as in instance or class methods.
- * Don't have default values.
- * Aren't bound with functools.partial.
- * Aren't replaced with mocks.
+ Should return the names of all function arguments that:
+ * Aren't bound to an instance or type as in instance or class methods.
+ * Don't have default values.
+ * Aren't bound with functools.partial.
+ * Aren't replaced with mocks.
The is_method and cls arguments indicate that the function should
be treated as a bound method even though it's not unless, only in
@@ -212,8 +209,9 @@ else:
def get_default_arg_names(function: Callable[..., Any]) -> Tuple[str, ...]:
- # Note: this code intentionally mirrors the code at the beginning of getfuncargnames,
- # to get the arguments which were excluded from its result because they had default values
+ # Note: this code intentionally mirrors the code at the beginning of
+ # getfuncargnames, to get the arguments which were excluded from its result
+ # because they had default values.
return tuple(
p.name
for p in signature(function).parameters.values()
@@ -242,22 +240,21 @@ def _bytes_to_ascii(val: bytes) -> str:
def ascii_escaped(val: Union[bytes, str]) -> str:
- """If val is pure ascii, returns it as a str(). Otherwise, escapes
+ r"""If val is pure ASCII, return it as an str, otherwise, escape
bytes objects into a sequence of escaped bytes:
- b'\xc3\xb4\xc5\xd6' -> '\\xc3\\xb4\\xc5\\xd6'
+ b'\xc3\xb4\xc5\xd6' -> r'\xc3\xb4\xc5\xd6'
and escapes unicode objects into a sequence of escaped unicode
ids, e.g.:
- '4\\nV\\U00043efa\\x0eMXWB\\x1e\\u3028\\u15fd\\xcd\\U0007d944'
+ r'4\nV\U00043efa\x0eMXWB\x1e\u3028\u15fd\xcd\U0007d944'
- note:
- the obvious "v.decode('unicode-escape')" will return
- valid utf-8 unicode if it finds them in bytes, but we
+ Note:
+ The obvious "v.decode('unicode-escape')" will return
+ valid UTF-8 unicode if it finds them in bytes, but we
want to return escaped bytes for any byte, even if they match
- a utf-8 string.
-
+ a UTF-8 string.
"""
if isinstance(val, bytes):
ret = _bytes_to_ascii(val)
@@ -270,18 +267,17 @@ def ascii_escaped(val: Union[bytes, str]) -> str:
class _PytestWrapper:
"""Dummy wrapper around a function object for internal use only.
- Used to correctly unwrap the underlying function object
- when we are creating fixtures, because we wrap the function object ourselves with a decorator
- to issue warnings when the fixture function is called directly.
+ Used to correctly unwrap the underlying function object when we are
+ creating fixtures, because we wrap the function object ourselves with a
+ decorator to issue warnings when the fixture function is called directly.
"""
obj = attr.ib()
def get_real_func(obj):
- """ gets the real function object of the (possibly) wrapped object by
- functools.wraps or functools.partial.
- """
+ """Get the real function object of the (possibly) wrapped object by
+ functools.wraps or functools.partial."""
start_obj = obj
for i in range(100):
# __pytest_wrapped__ is set by @pytest.fixture when wrapping the fixture function
@@ -307,10 +303,9 @@ def get_real_func(obj):
def get_real_method(obj, holder):
- """
- Attempts to obtain the real function object that might be wrapping ``obj``, while at the same time
- returning a bound method to ``holder`` if the original object was a bound method.
- """
+ """Attempt to obtain the real function object that might be wrapping
+ ``obj``, while at the same time returning a bound method to ``holder`` if
+ the original object was a bound method."""
try:
is_method = hasattr(obj, "__func__")
obj = get_real_func(obj)
@@ -329,12 +324,13 @@ def getimfunc(func):
def safe_getattr(object: Any, name: str, default: Any) -> Any:
- """ Like getattr but return default upon any Exception or any OutcomeException.
+ """Like getattr but return default upon any Exception or any OutcomeException.
Attribute access can potentially fail for 'evil' Python objects.
See issue #214.
- It catches OutcomeException because of #2490 (issue #580), new outcomes are derived from BaseException
- instead of Exception (for more details check #2707)
+ It catches OutcomeException because of #2490 (issue #580), new outcomes
+ are derived from BaseException instead of Exception (for more details
+ check #2707).
"""
try:
return getattr(object, name, default)
@@ -427,7 +423,7 @@ else:
#
# With `assert_never` we can do better:
#
-# // throw new Error('unreachable');
+# // raise Exception('unreachable')
# return assert_never(x)
#
# Now, if we forget to handle the new variant, the type-checker will emit a
diff --git a/src/_pytest/config/__init__.py b/src/_pytest/config/__init__.py
index 188cccd1a..e0c463d2f 100644
--- a/src/_pytest/config/__init__.py
+++ b/src/_pytest/config/__init__.py
@@ -1,4 +1,4 @@
-""" command line options, ini-file and conftest.py processing. """
+"""Command line options, ini-file and conftest.py processing."""
import argparse
import collections.abc
import contextlib
@@ -34,7 +34,7 @@ from pluggy import PluginManager
import _pytest._code
import _pytest.deprecated
-import _pytest.hookspec # the extension point definitions
+import _pytest.hookspec
from .exceptions import PrintHelp as PrintHelp
from .exceptions import UsageError as UsageError
from .findpaths import determine_setup
@@ -61,9 +61,12 @@ if TYPE_CHECKING:
_PluggyPlugin = object
"""A type to represent plugin objects.
+
Plugins can be any namespace, so we can't narrow it down much, but we use an
alias to make the intent clear.
-Ideally this type would be provided by pluggy itself."""
+
+Ideally this type would be provided by pluggy itself.
+"""
hookimpl = HookimplMarker("pytest")
@@ -71,25 +74,24 @@ hookspec = HookspecMarker("pytest")
class ExitCode(enum.IntEnum):
- """
- .. versionadded:: 5.0
-
- Encodes the valid exit codes by pytest.
+ """Encodes the valid exit codes by pytest.
Currently users and plugins may supply other exit codes as well.
+
+ .. versionadded:: 5.0
"""
- #: tests passed
+ #: Tests passed.
OK = 0
- #: tests failed
+ #: Tests failed.
TESTS_FAILED = 1
- #: pytest was interrupted
+ #: pytest was interrupted.
INTERRUPTED = 2
- #: an internal error got in the way
+ #: An internal error got in the way.
INTERNAL_ERROR = 3
- #: pytest was misused
+ #: pytest was misused.
USAGE_ERROR = 4
- #: pytest couldn't find tests
+ #: pytest couldn't find tests.
NO_TESTS_COLLECTED = 5
@@ -112,7 +114,7 @@ class ConftestImportFailure(Exception):
def filter_traceback_for_conftest_import_failure(
entry: _pytest._code.TracebackEntry,
) -> bool:
- """filters tracebacks entries which point to pytest internals or importlib.
+ """Filter tracebacks entries which point to pytest internals or importlib.
Make a special case for importlib because we use it to import test modules and conftest files
in _pytest.pathlib.import_path.
@@ -124,12 +126,12 @@ def main(
args: Optional[List[str]] = None,
plugins: Optional[Sequence[Union[str, _PluggyPlugin]]] = None,
) -> Union[int, ExitCode]:
- """ return exit code, after performing an in-process test run.
+ """Perform an in-process test run.
- :arg args: list of command line arguments.
+ :param args: List of command line arguments.
+ :param plugins: List of plugin objects to be auto-registered during initialization.
- :arg plugins: list of plugin objects to be auto-registered during
- initialization.
+ :returns: An exit code.
"""
try:
try:
@@ -171,7 +173,7 @@ def main(
def console_main() -> int:
- """pytest's CLI entry point.
+ """The CLI entry point of pytest.
This function is not meant for programmable use; use `main()` instead.
"""
@@ -193,10 +195,10 @@ class cmdline: # compatibility namespace
def filename_arg(path: str, optname: str) -> str:
- """ Argparse type validator for filename arguments.
+ """Argparse type validator for filename arguments.
- :path: path of filename
- :optname: name of the option
+ :path: Path of filename.
+ :optname: Name of the option.
"""
if os.path.isdir(path):
raise UsageError("{} must be a filename, given: {}".format(optname, path))
@@ -206,8 +208,8 @@ def filename_arg(path: str, optname: str) -> str:
def directory_arg(path: str, optname: str) -> str:
"""Argparse type validator for directory arguments.
- :path: path of directory
- :optname: name of the option
+ :path: Path of directory.
+ :optname: Name of the option.
"""
if not os.path.isdir(path):
raise UsageError("{} must be a directory, given: {}".format(optname, path))
@@ -278,8 +280,7 @@ def get_config(
def get_plugin_manager() -> "PytestPluginManager":
- """
- Obtain a new instance of the
+ """Obtain a new instance of the
:py:class:`_pytest.config.PytestPluginManager`, with default plugins
already loaded.
@@ -320,13 +321,12 @@ def _prepareconfig(
class PytestPluginManager(PluginManager):
- """
- Overwrites :py:class:`pluggy.PluginManager <pluggy.PluginManager>` to add pytest-specific
- functionality:
+ """A :py:class:`pluggy.PluginManager <pluggy.PluginManager>` with
+ additional pytest-specific functionality:
- * loading plugins from the command line, ``PYTEST_PLUGINS`` env variable and
- ``pytest_plugins`` global variables found in plugins being loaded;
- * ``conftest.py`` loading during start-up;
+ * Loading plugins from the command line, ``PYTEST_PLUGINS`` env variable and
+ ``pytest_plugins`` global variables found in plugins being loaded.
+ * ``conftest.py`` loading during start-up.
"""
def __init__(self) -> None:
@@ -359,27 +359,27 @@ class PytestPluginManager(PluginManager):
# Config._consider_importhook will set a real object if required.
self.rewrite_hook = _pytest.assertion.DummyRewriteHook()
- # Used to know when we are importing conftests after the pytest_configure stage
+ # Used to know when we are importing conftests after the pytest_configure stage.
self._configured = False
def parse_hookimpl_opts(self, plugin: _PluggyPlugin, name: str):
- # pytest hooks are always prefixed with pytest_
+ # pytest hooks are always prefixed with "pytest_",
# so we avoid accessing possibly non-readable attributes
- # (see issue #1073)
+ # (see issue #1073).
if not name.startswith("pytest_"):
return
- # ignore names which can not be hooks
+ # Ignore names which can not be hooks.
if name == "pytest_plugins":
return
method = getattr(plugin, name)
opts = super().parse_hookimpl_opts(plugin, name)
- # consider only actual functions for hooks (#3775)
+ # Consider only actual functions for hooks (#3775).
if not inspect.isroutine(method):
return
- # collect unmarked hooks as long as they have the `pytest_' prefix
+ # Collect unmarked hooks as long as they have the `pytest_' prefix.
if opts is None and name.startswith("pytest_"):
opts = {}
if opts is not None:
@@ -432,17 +432,18 @@ class PytestPluginManager(PluginManager):
return ret
def getplugin(self, name: str):
- # support deprecated naming because plugins (xdist e.g.) use it
+ # Support deprecated naming because plugins (xdist e.g.) use it.
plugin = self.get_plugin(name) # type: Optional[_PluggyPlugin]
return plugin
def hasplugin(self, name: str) -> bool:
- """Return True if the plugin with the given name is registered."""
+ """Return whether a plugin with the given name is registered."""
return bool(self.get_plugin(name))
def pytest_configure(self, config: "Config") -> None:
+ """:meta private:"""
# XXX now that the pluginmanager exposes hookimpl(tryfirst...)
- # we should remove tryfirst/trylast as markers
+ # we should remove tryfirst/trylast as markers.
config.addinivalue_line(
"markers",
"tryfirst: mark a hook implementation function such that the "
@@ -456,15 +457,15 @@ class PytestPluginManager(PluginManager):
self._configured = True
#
- # internal API for local conftest plugin handling
+ # Internal API for local conftest plugin handling.
#
def _set_initial_conftests(self, namespace: argparse.Namespace) -> None:
- """ load initial conftest files given a preparsed "namespace".
- As conftest files may add their own command line options
- which have arguments ('--my-opt somepath') we might get some
- false positives. All builtin and 3rd party plugins will have
- been loaded, however, so common options will not confuse our logic
- here.
+ """Load initial conftest files given a preparsed "namespace".
+
+ As conftest files may add their own command line options which have
+ arguments ('--my-opt somepath') we might get some false positives.
+ All builtin and 3rd party plugins will have been loaded, however, so
+ common options will not confuse our logic here.
"""
current = py.path.local()
self._confcutdir = (
@@ -513,7 +514,7 @@ class PytestPluginManager(PluginManager):
# XXX these days we may rather want to use config.rootdir
# and allow users to opt into looking into the rootdir parent
- # directories instead of requiring to specify confcutdir
+ # directories instead of requiring to specify confcutdir.
clist = []
for parent in directory.parts():
if self._confcutdir and self._confcutdir.relto(parent):
@@ -539,8 +540,8 @@ class PytestPluginManager(PluginManager):
def _importconftest(
self, conftestpath: py.path.local, importmode: Union[str, ImportMode],
) -> types.ModuleType:
- # Use a resolved Path object as key to avoid loading the same conftest twice
- # with build systems that create build directories containing
+ # Use a resolved Path object as key to avoid loading the same conftest
+ # twice with build systems that create build directories containing
# symlinks to actual files.
# Using Path().resolve() is better than py.path.realpath because
# it resolves to the correct path/drive in case-insensitive file systems (#5792)
@@ -627,7 +628,7 @@ class PytestPluginManager(PluginManager):
if name in essential_plugins:
raise UsageError("plugin %s cannot be disabled" % name)
- # PR #4304 : remove stepwise if cacheprovider is blocked
+ # PR #4304: remove stepwise if cacheprovider is blocked.
if name == "cacheprovider":
self.set_blocked("stepwise")
self.set_blocked("pytest_stepwise")
@@ -663,11 +664,12 @@ class PytestPluginManager(PluginManager):
self.import_plugin(import_spec)
def import_plugin(self, modname: str, consider_entry_points: bool = False) -> None:
+ """Import a plugin with ``modname``.
+
+ If ``consider_entry_points`` is True, entry point names are also
+ considered to find a plugin.
"""
- Imports a plugin with ``modname``. If ``consider_entry_points`` is True, entry point
- names are also considered to find a plugin.
- """
- # most often modname refers to builtin modules, e.g. "pytester",
+ # Most often modname refers to builtin modules, e.g. "pytester",
# "terminal" or "capture". Those plugins are registered under their
# basename for historic purposes but must be imported with the
# _pytest prefix.
@@ -743,10 +745,11 @@ notset = Notset()
def _iter_rewritable_modules(package_files: Iterable[str]) -> Iterator[str]:
- """
- Given an iterable of file names in a source distribution, return the "names" that should
- be marked for assertion rewrite (for example the package "pytest_mock/__init__.py" should
- be added as "pytest_mock" in the assertion rewrite mechanism.
+ """Given an iterable of file names in a source distribution, return the "names" that should
+ be marked for assertion rewrite.
+
+ For example the package "pytest_mock/__init__.py" should be added as "pytest_mock" in
+ the assertion rewrite mechanism.
This function has to deal with dist-info based distributions and egg based distributions
(which are still very much in use for "editable" installs).
@@ -790,11 +793,11 @@ def _iter_rewritable_modules(package_files: Iterable[str]) -> Iterator[str]:
yield package_name
if not seen_some:
- # at this point we did not find any packages or modules suitable for assertion
+ # At this point we did not find any packages or modules suitable for assertion
# rewriting, so we try again by stripping the first path component (to account for
- # "src" based source trees for example)
- # this approach lets us have the common case continue to be fast, as egg-distributions
- # are rarer
+ # "src" based source trees for example).
+ # This approach lets us have the common case continue to be fast, as egg-distributions
+ # are rarer.
new_package_files = []
for fn in package_files:
parts = fn.split("/")
@@ -810,8 +813,7 @@ def _args_converter(args: Iterable[str]) -> Tuple[str, ...]:
class Config:
- """
- Access to configuration values, pluginmanager and plugin hooks.
+ """Access to configuration values, pluginmanager and plugin hooks.
:param PytestPluginManager pluginmanager:
@@ -837,11 +839,11 @@ class Config:
"""
args = attr.ib(type=Tuple[str, ...], converter=_args_converter)
- """tuple of command-line arguments as passed to ``pytest.main()``."""
+ """Tuple of command-line arguments as passed to ``pytest.main()``."""
plugins = attr.ib(type=Optional[Sequence[Union[str, _PluggyPlugin]]])
- """list of extra plugins, might be `None`."""
+ """List of extra plugins, might be `None`."""
dir = attr.ib(type=Path)
- """directory where ``pytest.main()`` was invoked from."""
+ """Directory from which ``pytest.main()`` was invoked."""
def __init__(
self,
@@ -857,9 +859,10 @@ class Config:
)
self.option = argparse.Namespace()
- """access to command line option as attributes.
+ """Access to command line option as attributes.
- :type: argparse.Namespace"""
+ :type: argparse.Namespace
+ """
self.invocation_params = invocation_params
@@ -869,9 +872,10 @@ class Config:
processopt=self._processopt,
)
self.pluginmanager = pluginmanager
- """the plugin manager handles plugin registration and hook invocation.
+ """The plugin manager handles plugin registration and hook invocation.
- :type: PytestPluginManager"""
+ :type: PytestPluginManager.
+ """
self.trace = self.pluginmanager.trace.root.get("config")
self.hook = self.pluginmanager.hook
@@ -895,11 +899,11 @@ class Config:
@property
def invocation_dir(self) -> py.path.local:
- """Backward compatibility"""
+ """Backward compatibility."""
return py.path.local(str(self.invocation_params.dir))
def add_cleanup(self, func: Callable[[], None]) -> None:
- """ Add a function to be called when the config object gets out of
+ """Add a function to be called when the config object gets out of
use (usually coninciding with pytest_unconfigure)."""
self._cleanup.append(func)
@@ -970,7 +974,7 @@ class Config:
sys.stderr.flush()
def cwd_relative_nodeid(self, nodeid: str) -> str:
- # nodeid's are relative to the rootpath, compute relative to cwd
+ # nodeid's are relative to the rootpath, compute relative to cwd.
if self.invocation_dir != self.rootdir:
fullpath = self.rootdir.join(nodeid)
nodeid = self.invocation_dir.bestrelpath(fullpath)
@@ -978,7 +982,7 @@ class Config:
@classmethod
def fromdictargs(cls, option_dict, args) -> "Config":
- """ constructor usable for subprocesses. """
+ """Constructor usable for subprocesses."""
config = get_config(args)
config.option.__dict__.update(option_dict)
config.parse(args, addopts=False)
@@ -1041,11 +1045,9 @@ class Config:
self._warn_about_missing_assertion(mode)
def _mark_plugins_for_rewrite(self, hook) -> None:
- """
- Given an importhook, mark for rewrite any top-level
+ """Given an importhook, mark for rewrite any top-level
modules or packages in the distribution package for
- all pytest plugins.
- """
+ all pytest plugins."""
self.pluginmanager.rewrite_hook = hook
if os.environ.get("PYTEST_DISABLE_PLUGIN_AUTOLOAD"):
@@ -1194,7 +1196,7 @@ class Config:
return [name for name in self.inicfg if name not in parser_inicfg]
def parse(self, args: List[str], addopts: bool = True) -> None:
- # parse given cmdline arguments into this config object.
+ # Parse given cmdline arguments into this config object.
assert not hasattr(
self, "args"
), "can only parse cmdline args at most once per Config object"
@@ -1219,18 +1221,20 @@ class Config:
pass
def addinivalue_line(self, name: str, line: str) -> None:
- """ add a line to an ini-file option. The option must have been
- declared but might not yet be set in which case the line becomes the
- the first line in its value. """
+ """Add a line to an ini-file option. The option must have been
+ declared but might not yet be set in which case the line becomes
+ the first line in its value."""
x = self.getini(name)
assert isinstance(x, list)
x.append(line) # modifies the cached list inline
def getini(self, name: str):
- """ return configuration value from an :ref:`ini file <configfiles>`. If the
- specified name hasn't been registered through a prior
+ """Return configuration value from an :ref:`ini file <configfiles>`.
+
+ If the specified name hasn't been registered through a prior
:py:func:`parser.addini <_pytest.config.argparsing.Parser.addini>`
- call (usually from a plugin), a ValueError is raised. """
+ call (usually from a plugin), a ValueError is raised.
+ """
try:
return self._inicache[name]
except KeyError:
@@ -1254,19 +1258,20 @@ class Config:
return []
else:
value = override_value
- # coerce the values based on types
- # note: some coercions are only required if we are reading from .ini files, because
+ # Coerce the values based on types.
+ #
+ # Note: some coercions are only required if we are reading from .ini files, because
# the file format doesn't contain type information, but when reading from toml we will
# get either str or list of str values (see _parse_ini_config_from_pyproject_toml).
- # for example:
+ # For example:
#
# ini:
# a_line_list = "tests acceptance"
- # in this case, we need to split the string to obtain a list of strings
+ # in this case, we need to split the string to obtain a list of strings.
#
# toml:
# a_line_list = ["tests", "acceptance"]
- # in this case, we already have a list ready to use
+ # in this case, we already have a list ready to use.
#
if type == "pathlist":
# TODO: This assert is probably not valid in all cases.
@@ -1307,9 +1312,9 @@ class Config:
def _get_override_ini_value(self, name: str) -> Optional[str]:
value = None
- # override_ini is a list of "ini=value" options
- # always use the last item if multiple values are set for same ini-name,
- # e.g. -o foo=bar1 -o foo=bar2 will set foo to bar2
+ # override_ini is a list of "ini=value" options.
+ # Always use the last item if multiple values are set for same ini-name,
+ # e.g. -o foo=bar1 -o foo=bar2 will set foo to bar2.
for ini_config in self._override_ini:
try:
key, user_ini_value = ini_config.split("=", 1)
@@ -1325,12 +1330,12 @@ class Config:
return value
def getoption(self, name: str, default=notset, skip: bool = False):
- """ return command line option value.
+ """Return command line option value.
- :arg name: name of the option. You may also specify
+ :param name: Name of the option. You may also specify
the literal ``--OPT`` option instead of the "dest" option name.
- :arg default: default value if no option of that name exists.
- :arg skip: if True raise pytest.skip if option does not exists
+ :param default: Default value if no option of that name exists.
+ :param skip: If True, raise pytest.skip if option does not exists
or has a None value.
"""
name = self._opt2dest.get(name, name)
@@ -1349,11 +1354,11 @@ class Config:
raise ValueError("no option named {!r}".format(name)) from e
def getvalue(self, name: str, path=None):
- """ (deprecated, use getoption()) """
+ """Deprecated, use getoption() instead."""
return self.getoption(name)
def getvalueorskip(self, name: str, path=None):
- """ (deprecated, use getoption(skip=True)) """
+ """Deprecated, use getoption(skip=True) instead."""
return self.getoption(name, skip=True)
def _warn_about_missing_assertion(self, mode: str) -> None:
@@ -1392,10 +1397,13 @@ def create_terminal_writer(
config: Config, file: Optional[TextIO] = None
) -> TerminalWriter:
"""Create a TerminalWriter instance configured according to the options
- in the config object. Every code which requires a TerminalWriter object
- and has access to a config object should use this function.
+ in the config object.
+
+ Every code which requires a TerminalWriter object and has access to a
+ config object should use this function.
"""
tw = TerminalWriter(file=file)
+
if config.option.color == "yes":
tw.hasmarkup = True
elif config.option.color == "no":
@@ -1405,6 +1413,7 @@ def create_terminal_writer(
tw.code_highlight = True
elif config.option.code_highlight == "no":
tw.code_highlight = False
+
return tw
@@ -1415,7 +1424,7 @@ def _strtobool(val: str) -> bool:
are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if
'val' is anything else.
- .. note:: copied from distutils.util
+ .. note:: Copied from distutils.util.
"""
val = val.lower()
if val in ("y", "yes", "t", "true", "on", "1"):
diff --git a/src/_pytest/config/argparsing.py b/src/_pytest/config/argparsing.py
index 084ce16e5..6c6feff42 100644
--- a/src/_pytest/config/argparsing.py
+++ b/src/_pytest/config/argparsing.py
@@ -27,9 +27,9 @@ FILE_OR_DIR = "file_or_dir"
class Parser:
- """ Parser for command line arguments and ini-file values.
+ """Parser for command line arguments and ini-file values.
- :ivar extra_info: dict of generic param -> value to display in case
+ :ivar extra_info: Dict of generic param -> value to display in case
there's an error processing the command line arguments.
"""
@@ -56,11 +56,11 @@ class Parser:
def getgroup(
self, name: str, description: str = "", after: Optional[str] = None
) -> "OptionGroup":
- """ get (or create) a named option Group.
+ """Get (or create) a named option Group.
- :name: name of the option group.
- :description: long description for --help output.
- :after: name of other group, used for ordering --help output.
+ :name: Name of the option group.
+ :description: Long description for --help output.
+ :after: Name of another group, used for ordering --help output.
The returned group object has an ``addoption`` method with the same
signature as :py:func:`parser.addoption
@@ -79,15 +79,14 @@ class Parser:
return group
def addoption(self, *opts: str, **attrs: Any) -> None:
- """ register a command line option.
+ """Register a command line option.
- :opts: option names, can be short or long options.
- :attrs: same attributes which the ``add_argument()`` function of the
- `argparse library
- <https://docs.python.org/library/argparse.html>`_
+ :opts: Option names, can be short or long options.
+ :attrs: Same attributes which the ``add_argument()`` function of the
+ `argparse library <https://docs.python.org/library/argparse.html>`_
accepts.
- After command line parsing options are available on the pytest config
+ After command line parsing, options are available on the pytest config
object via ``config.option.NAME`` where ``NAME`` is usually set
by passing a ``dest`` attribute, for example
``addoption("--long", dest="NAME", ...)``.
@@ -141,9 +140,7 @@ class Parser:
args: Sequence[Union[str, py.path.local]],
namespace: Optional[argparse.Namespace] = None,
) -> argparse.Namespace:
- """parses and returns a namespace object with known arguments at this
- point.
- """
+ """Parse and return a namespace object with known arguments at this point."""
return self.parse_known_and_unknown_args(args, namespace=namespace)[0]
def parse_known_and_unknown_args(
@@ -151,9 +148,8 @@ class Parser:
args: Sequence[Union[str, py.path.local]],
namespace: Optional[argparse.Namespace] = None,
) -> Tuple[argparse.Namespace, List[str]]:
- """parses and returns a namespace object with known arguments, and
- the remaining arguments unknown at this point.
- """
+ """Parse and return a namespace object with known arguments, and
+ the remaining arguments unknown at this point."""
optparser = self._getparser()
strargs = [str(x) if isinstance(x, py.path.local) else x for x in args]
return optparser.parse_known_args(strargs, namespace=namespace)
@@ -165,12 +161,12 @@ class Parser:
type: Optional["Literal['pathlist', 'args', 'linelist', 'bool']"] = None,
default=None,
) -> None:
- """ register an ini-file option.
+ """Register an ini-file option.
- :name: name of the ini-variable
- :type: type of the variable, can be ``pathlist``, ``args``, ``linelist``
+ :name: Name of the ini-variable.
+ :type: Type of the variable, can be ``pathlist``, ``args``, ``linelist``
or ``bool``.
- :default: default value if no ini-file option exists but is queried.
+ :default: Default value if no ini-file option exists but is queried.
The value of ini-variables can be retrieved via a call to
:py:func:`config.getini(name) <_pytest.config.Config.getini>`.
@@ -181,10 +177,8 @@ class Parser:
class ArgumentError(Exception):
- """
- Raised if an Argument instance is created with invalid or
- inconsistent arguments.
- """
+ """Raised if an Argument instance is created with invalid or
+ inconsistent arguments."""
def __init__(self, msg: str, option: Union["Argument", str]) -> None:
self.msg = msg
@@ -198,17 +192,18 @@ class ArgumentError(Exception):
class Argument:
- """class that mimics the necessary behaviour of optparse.Option
+ """Class that mimics the necessary behaviour of optparse.Option.
+
+ It's currently a least effort implementation and ignoring choices
+ and integer prefixes.
- it's currently a least effort implementation
- and ignoring choices and integer prefixes
https://docs.python.org/3/library/optparse.html#optparse-standard-option-types
"""
_typ_map = {"int": int, "string": str, "float": float, "complex": complex}
def __init__(self, *names: str, **attrs: Any) -> None:
- """store parms in private vars for use in add_argument"""
+ """Store parms in private vars for use in add_argument."""
self._attrs = attrs
self._short_opts = [] # type: List[str]
self._long_opts = [] # type: List[str]
@@ -224,7 +219,7 @@ class Argument:
except KeyError:
pass
else:
- # this might raise a keyerror as well, don't want to catch that
+ # This might raise a keyerror as well, don't want to catch that.
if isinstance(typ, str):
if typ == "choice":
warnings.warn(
@@ -247,12 +242,12 @@ class Argument:
stacklevel=4,
)
attrs["type"] = Argument._typ_map[typ]
- # used in test_parseopt -> test_parse_defaultgetter
+ # Used in test_parseopt -> test_parse_defaultgetter.
self.type = attrs["type"]
else:
self.type = typ
try:
- # attribute existence is tested in Config._processopt
+ # Attribute existence is tested in Config._processopt.
self.default = attrs["default"]
except KeyError:
pass
@@ -273,7 +268,7 @@ class Argument:
return self._short_opts + self._long_opts
def attrs(self) -> Mapping[str, Any]:
- # update any attributes set by processopt
+ # Update any attributes set by processopt.
attrs = "default dest help".split()
attrs.append(self.dest)
for attr in attrs:
@@ -289,9 +284,10 @@ class Argument:
return self._attrs
def _set_opt_strings(self, opts: Sequence[str]) -> None:
- """directly from optparse
+ """Directly from optparse.
- might not be necessary as this is passed to argparse later on"""
+ Might not be necessary as this is passed to argparse later on.
+ """
for opt in opts:
if len(opt) < 2:
raise ArgumentError(
@@ -340,12 +336,12 @@ class OptionGroup:
self.parser = parser
def addoption(self, *optnames: str, **attrs: Any) -> None:
- """ add an option to this group.
+ """Add an option to this group.
- if a shortened version of a long option is specified it will
+ If a shortened version of a long option is specified, it will
be suppressed in the help. addoption('--twowords', '--two-words')
results in help showing '--two-words' only, but --twowords gets
- accepted **and** the automatic destination is in args.twowords
+ accepted **and** the automatic destination is in args.twowords.
"""
conflict = set(optnames).intersection(
name for opt in self.options for name in opt.names()
@@ -386,7 +382,7 @@ class MyOptionParser(argparse.ArgumentParser):
allow_abbrev=False,
)
# extra_info is a dict of (param -> value) to display if there's
- # an usage error to provide more contextual information to the user
+ # an usage error to provide more contextual information to the user.
self.extra_info = extra_info if extra_info else {}
def error(self, message: str) -> "NoReturn":
@@ -405,7 +401,7 @@ class MyOptionParser(argparse.ArgumentParser):
args: Optional[Sequence[str]] = None,
namespace: Optional[argparse.Namespace] = None,
) -> argparse.Namespace:
- """allow splitting of positional arguments"""
+ """Allow splitting of positional arguments."""
parsed, unrecognized = self.parse_known_args(args, namespace)
if unrecognized:
for arg in unrecognized:
@@ -457,15 +453,15 @@ class MyOptionParser(argparse.ArgumentParser):
class DropShorterLongHelpFormatter(argparse.HelpFormatter):
- """shorten help for long options that differ only in extra hyphens
+ """Shorten help for long options that differ only in extra hyphens.
- - collapse **long** options that are the same except for extra hyphens
- - shortcut if there are only two options and one of them is a short one
- - cache result on action object as this is called at least 2 times
+ - Collapse **long** options that are the same except for extra hyphens.
+ - Shortcut if there are only two options and one of them is a short one.
+ - Cache result on the action object as this is called at least 2 times.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
- """Use more accurate terminal width via pylib."""
+ # Use more accurate terminal width.
if "width" not in kwargs:
kwargs["width"] = _pytest._io.get_terminal_width()
super().__init__(*args, **kwargs)
diff --git a/src/_pytest/config/exceptions.py b/src/_pytest/config/exceptions.py
index 19fe5cb08..95c412734 100644
--- a/src/_pytest/config/exceptions.py
+++ b/src/_pytest/config/exceptions.py
@@ -1,9 +1,7 @@
class UsageError(Exception):
- """ error in pytest usage or invocation"""
+ """Error in pytest usage or invocation."""
class PrintHelp(Exception):
- """Raised when pytest should print it's help to skip the rest of the
+ """Raised when pytest should print its help to skip the rest of the
argument parsing and validation."""
-
- pass
diff --git a/src/_pytest/config/findpaths.py b/src/_pytest/config/findpaths.py
index 08a71122d..dcd0be9ed 100644
--- a/src/_pytest/config/findpaths.py
+++ b/src/_pytest/config/findpaths.py
@@ -18,10 +18,10 @@ if TYPE_CHECKING:
def _parse_ini_config(path: py.path.local) -> iniconfig.IniConfig:
- """Parses the given generic '.ini' file using legacy IniConfig parser, returning
+ """Parse the given generic '.ini' file using legacy IniConfig parser, returning
the parsed object.
- Raises UsageError if the file cannot be parsed.
+ Raise UsageError if the file cannot be parsed.
"""
try:
return iniconfig.IniConfig(path)
@@ -32,23 +32,23 @@ def _parse_ini_config(path: py.path.local) -> iniconfig.IniConfig:
def load_config_dict_from_file(
filepath: py.path.local,
) -> Optional[Dict[str, Union[str, List[str]]]]:
- """Loads pytest configuration from the given file path, if supported.
+ """Load pytest configuration from the given file path, if supported.
Return None if the file does not contain valid pytest configuration.
"""
- # configuration from ini files are obtained from the [pytest] section, if present.
+ # Configuration from ini files are obtained from the [pytest] section, if present.
if filepath.ext == ".ini":
iniconfig = _parse_ini_config(filepath)
if "pytest" in iniconfig:
return dict(iniconfig["pytest"].items())
else:
- # "pytest.ini" files are always the source of configuration, even if empty
+ # "pytest.ini" files are always the source of configuration, even if empty.
if filepath.basename == "pytest.ini":
return {}
- # '.cfg' files are considered if they contain a "[tool:pytest]" section
+ # '.cfg' files are considered if they contain a "[tool:pytest]" section.
elif filepath.ext == ".cfg":
iniconfig = _parse_ini_config(filepath)
@@ -59,7 +59,7 @@ def load_config_dict_from_file(
# plain "[pytest]" sections in setup.cfg files is no longer supported (#3086).
fail(CFG_PYTEST_SECTION.format(filename="setup.cfg"), pytrace=False)
- # '.toml' files are considered if they contain a [tool.pytest.ini_options] table
+ # '.toml' files are considered if they contain a [tool.pytest.ini_options] table.
elif filepath.ext == ".toml":
import toml
@@ -83,10 +83,8 @@ def locate_config(
) -> Tuple[
Optional[py.path.local], Optional[py.path.local], Dict[str, Union[str, List[str]]],
]:
- """
- Search in the list of arguments for a valid ini-file for pytest,
- and return a tuple of (rootdir, inifile, cfg-dict).
- """
+ """Search in the list of arguments for a valid ini-file for pytest,
+ and return a tuple of (rootdir, inifile, cfg-dict)."""
config_names = [
"pytest.ini",
"pyproject.toml",
diff --git a/src/_pytest/debugging.py b/src/_pytest/debugging.py
index 3677d3bf9..5dda4b8d7 100644
--- a/src/_pytest/debugging.py
+++ b/src/_pytest/debugging.py
@@ -1,4 +1,4 @@
-""" interactive debugging with PDB, the Python Debugger. """
+"""Interactive debugging with PDB, the Python Debugger."""
import argparse
import functools
import sys
@@ -87,7 +87,7 @@ def pytest_configure(config: Config) -> None:
class pytestPDB:
- """ Pseudo PDB that defers to the real pdb. """
+ """Pseudo PDB that defers to the real pdb."""
_pluginmanager = None # type: PytestPluginManager
_config = None # type: Config
@@ -226,7 +226,7 @@ class pytestPDB:
@classmethod
def _init_pdb(cls, method, *args, **kwargs):
- """ Initialize PDB debugging, dropping any IO capturing. """
+ """Initialize PDB debugging, dropping any IO capturing."""
import _pytest.config
if cls._pluginmanager is not None:
@@ -298,16 +298,16 @@ class PdbTrace:
def wrap_pytest_function_for_tracing(pyfuncitem):
- """Changes the python function object of the given Function item by a wrapper which actually
- enters pdb before calling the python function itself, effectively leaving the user
- in the pdb prompt in the first statement of the function.
- """
+ """Change the Python function object of the given Function item by a
+ wrapper which actually enters pdb before calling the python function
+ itself, effectively leaving the user in the pdb prompt in the first
+ statement of the function."""
_pdb = pytestPDB._init_pdb("runcall")
testfunction = pyfuncitem.obj
# we can't just return `partial(pdb.runcall, testfunction)` because (on
# python < 3.7.4) runcall's first param is `func`, which means we'd get
- # an exception if one of the kwargs to testfunction was called `func`
+ # an exception if one of the kwargs to testfunction was called `func`.
@functools.wraps(testfunction)
def wrapper(*args, **kwargs):
func = functools.partial(testfunction, *args, **kwargs)
@@ -318,7 +318,7 @@ def wrap_pytest_function_for_tracing(pyfuncitem):
def maybe_wrap_pytest_function_for_tracing(pyfuncitem):
"""Wrap the given pytestfunct item for tracing support if --trace was given in
- the command line"""
+ the command line."""
if pyfuncitem.config.getvalue("trace"):
wrap_pytest_function_for_tracing(pyfuncitem)
diff --git a/src/_pytest/deprecated.py b/src/_pytest/deprecated.py
index 868318a2b..bd2574ba7 100644
--- a/src/_pytest/deprecated.py
+++ b/src/_pytest/deprecated.py
@@ -1,6 +1,5 @@
-"""
-This module contains deprecation messages and bits of code used elsewhere in the codebase
-that is planned to be removed in the next pytest release.
+"""Deprecation messages and bits of code used elsewhere in the codebase that
+is planned to be removed in the next pytest release.
Keeping it in a central location makes it easy to track what is deprecated and should
be removed when the time comes.
diff --git a/src/_pytest/doctest.py b/src/_pytest/doctest.py
index ebf0d584c..440bc649c 100644
--- a/src/_pytest/doctest.py
+++ b/src/_pytest/doctest.py
@@ -1,4 +1,4 @@
-""" discover and run doctests in modules and test files."""
+"""Discover and run doctests in modules and test files."""
import bdb
import inspect
import platform
@@ -171,9 +171,10 @@ def _init_runner_class() -> "Type[doctest.DocTestRunner]":
import doctest
class PytestDoctestRunner(doctest.DebugRunner):
- """
- Runner to collect failures. Note that the out variable in this case is
- a list instead of a stdout-like object
+ """Runner to collect failures.
+
+ Note that the out variable in this case is a list instead of a
+ stdout-like object.
"""
def __init__(
@@ -261,9 +262,7 @@ class DoctestItem(pytest.Item):
dtest: "doctest.DocTest"
):
# incompatible signature due to to imposed limits on sublcass
- """
- the public named constructor
- """
+ """The public named constructor."""
return super().from_parent(name=name, parent=parent, runner=runner, dtest=dtest)
def setup(self) -> None:
@@ -289,9 +288,7 @@ class DoctestItem(pytest.Item):
raise MultipleDoctestFailures(failures)
def _disable_output_capturing_for_darwin(self) -> None:
- """
- Disable output capturing. Otherwise, stdout is lost to doctest (#985)
- """
+ """Disable output capturing. Otherwise, stdout is lost to doctest (#985)."""
if platform.system() != "Darwin":
return
capman = self.config.pluginmanager.getplugin("capturemanager")
@@ -403,7 +400,7 @@ def _get_continue_on_failure(config):
continue_on_failure = config.getvalue("doctest_continue_on_failure")
if continue_on_failure:
# We need to turn off this if we use pdb since we should stop at
- # the first failure
+ # the first failure.
if config.getvalue("usepdb"):
continue_on_failure = False
return continue_on_failure
@@ -415,8 +412,8 @@ class DoctestTextfile(pytest.Module):
def collect(self) -> Iterable[DoctestItem]:
import doctest
- # inspired by doctest.testfile; ideally we would use it directly,
- # but it doesn't support passing a custom checker
+ # Inspired by doctest.testfile; ideally we would use it directly,
+ # but it doesn't support passing a custom checker.
encoding = self.config.getini("doctest_encoding")
text = self.fspath.read_text(encoding)
filename = str(self.fspath)
@@ -441,9 +438,8 @@ class DoctestTextfile(pytest.Module):
def _check_all_skipped(test: "doctest.DocTest") -> None:
- """raises pytest.skip() if all examples in the given DocTest have the SKIP
- option set.
- """
+ """Raise pytest.skip() if all examples in the given DocTest have the SKIP
+ option set."""
import doctest
all_skipped = all(x.options.get(doctest.SKIP, False) for x in test.examples)
@@ -452,9 +448,8 @@ def _check_all_skipped(test: "doctest.DocTest") -> None:
def _is_mocked(obj: object) -> bool:
- """
- returns if a object is possibly a mock object by checking the existence of a highly improbable attribute
- """
+ """Return if an object is possibly a mock object by checking the
+ existence of a highly improbable attribute."""
return (
safe_getattr(obj, "pytest_mock_example_attribute_that_shouldnt_exist", None)
is not None
@@ -463,10 +458,8 @@ def _is_mocked(obj: object) -> bool:
@contextmanager
def _patch_unwrap_mock_aware() -> Generator[None, None, None]:
- """
- contextmanager which replaces ``inspect.unwrap`` with a version
- that's aware of mock objects and doesn't recurse on them
- """
+ """Context manager which replaces ``inspect.unwrap`` with a version
+ that's aware of mock objects and doesn't recurse into them."""
real_unwrap = inspect.unwrap
def _mock_aware_unwrap(
@@ -498,16 +491,15 @@ class DoctestModule(pytest.Module):
import doctest
class MockAwareDocTestFinder(doctest.DocTestFinder):
- """
- a hackish doctest finder that overrides stdlib internals to fix a stdlib bug
+ """A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.
https://github.com/pytest-dev/pytest/issues/3456
https://bugs.python.org/issue25532
"""
def _find_lineno(self, obj, source_lines):
- """
- Doctest code does not take into account `@property`, this is a hackish way to fix it.
+ """Doctest code does not take into account `@property`, this
+ is a hackish way to fix it.
https://bugs.python.org/issue17446
"""
@@ -542,7 +534,7 @@ class DoctestModule(pytest.Module):
pytest.skip("unable to import module %r" % self.fspath)
else:
raise
- # uses internal doctest module parsing mechanism
+ # Uses internal doctest module parsing mechanism.
finder = MockAwareDocTestFinder()
optionflags = get_optionflags(self)
runner = _get_runner(
@@ -560,9 +552,7 @@ class DoctestModule(pytest.Module):
def _setup_fixtures(doctest_item: DoctestItem) -> FixtureRequest:
- """
- Used by DoctestTextfile and DoctestItem to setup fixture information.
- """
+ """Used by DoctestTextfile and DoctestItem to setup fixture information."""
def func() -> None:
pass
@@ -582,11 +572,9 @@ def _init_checker_class() -> "Type[doctest.OutputChecker]":
import re
class LiteralsOutputChecker(doctest.OutputChecker):
- """
- Based on doctest_nose_plugin.py from the nltk project
- (https://github.com/nltk/nltk) and on the "numtest" doctest extension
- by Sebastien Boisgerault (https://github.com/boisgera/numtest).
- """
+ # Based on doctest_nose_plugin.py from the nltk project
+ # (https://github.com/nltk/nltk) and on the "numtest" doctest extension
+ # by Sebastien Boisgerault (https://github.com/boisgera/numtest).
_unicode_literal_re = re.compile(r"(\W|^)[uU]([rR]?[\'\"])", re.UNICODE)
_bytes_literal_re = re.compile(r"(\W|^)[bB]([rR]?[\'\"])", re.UNICODE)
@@ -671,8 +659,7 @@ def _init_checker_class() -> "Type[doctest.OutputChecker]":
def _get_checker() -> "doctest.OutputChecker":
- """
- Returns a doctest.OutputChecker subclass that supports some
+ """Return a doctest.OutputChecker subclass that supports some
additional options:
* ALLOW_UNICODE and ALLOW_BYTES options to ignore u'' and b''
@@ -692,36 +679,31 @@ def _get_checker() -> "doctest.OutputChecker":
def _get_allow_unicode_flag() -> int:
- """
- Registers and returns the ALLOW_UNICODE flag.
- """
+ """Register and return the ALLOW_UNICODE flag."""
import doctest
return doctest.register_optionflag("ALLOW_UNICODE")
def _get_allow_bytes_flag() -> int:
- """
- Registers and returns the ALLOW_BYTES flag.
- """
+ """Register and return the ALLOW_BYTES flag."""
import doctest
return doctest.register_optionflag("ALLOW_BYTES")
def _get_number_flag() -> int:
- """
- Registers and returns the NUMBER flag.
- """
+ """Register and return the NUMBER flag."""
import doctest
return doctest.register_optionflag("NUMBER")
def _get_report_choice(key: str) -> int:
- """
- This function returns the actual `doctest` module flag value, we want to do it as late as possible to avoid
- importing `doctest` and all its dependencies when parsing options, as it adds overhead and breaks tests.
+ """Return the actual `doctest` module flag value.
+
+ We want to do it as late as possible to avoid importing `doctest` and all
+ its dependencies when parsing options, as it adds overhead and breaks tests.
"""
import doctest
@@ -736,7 +718,6 @@ def _get_report_choice(key: str) -> int:
@pytest.fixture(scope="session")
def doctest_namespace() -> Dict[str, Any]:
- """
- Fixture that returns a :py:class:`dict` that will be injected into the namespace of doctests.
- """
+ """Fixture that returns a :py:class:`dict` that will be injected into the
+ namespace of doctests."""
return dict()
diff --git a/src/_pytest/faulthandler.py b/src/_pytest/faulthandler.py
index 0d969840b..e4a952966 100644
--- a/src/_pytest/faulthandler.py
+++ b/src/_pytest/faulthandler.py
@@ -100,8 +100,7 @@ class FaultHandlerHooks:
@pytest.hookimpl(tryfirst=True)
def pytest_enter_pdb(self) -> None:
- """Cancel any traceback dumping due to timeout before entering pdb.
- """
+ """Cancel any traceback dumping due to timeout before entering pdb."""
import faulthandler
faulthandler.cancel_dump_traceback_later()
@@ -109,8 +108,7 @@ class FaultHandlerHooks:
@pytest.hookimpl(tryfirst=True)
def pytest_exception_interact(self) -> None:
"""Cancel any traceback dumping due to an interactive exception being
- raised.
- """
+ raised."""
import faulthandler
faulthandler.cancel_dump_traceback_later()
diff --git a/src/_pytest/fixtures.py b/src/_pytest/fixtures.py
index d9f918745..5dbaf9e06 100644
--- a/src/_pytest/fixtures.py
+++ b/src/_pytest/fixtures.py
@@ -166,15 +166,16 @@ def get_scope_node(node, scope):
def add_funcarg_pseudo_fixture_def(
collector, metafunc: "Metafunc", fixturemanager: "FixtureManager"
) -> None:
- # this function will transform all collected calls to a functions
+ # This function will transform all collected calls to functions
# if they use direct funcargs (i.e. direct parametrization)
# because we want later test execution to be able to rely on
# an existing FixtureDef structure for all arguments.
# XXX we can probably avoid this algorithm if we modify CallSpec2
# to directly care for creating the fixturedefs within its methods.
if not metafunc._calls[0].funcargs:
- return # this function call does not have direct parametrization
- # collect funcargs of all callspecs into a list of values
+ # This function call does not have direct parametrization.
+ return
+ # Collect funcargs of all callspecs into a list of values.
arg2params = {} # type: Dict[str, List[object]]
arg2scope = {} # type: Dict[str, _Scope]
for callspec in metafunc._calls:
@@ -189,11 +190,11 @@ def add_funcarg_pseudo_fixture_def(
arg2scope[argname] = scopes[scopenum]
callspec.funcargs.clear()
- # register artificial FixtureDef's so that later at test execution
+ # Register artificial FixtureDef's so that later at test execution
# time we can rely on a proper FixtureDef to exist for fixture setup.
arg2fixturedefs = metafunc._arg2fixturedefs
for argname, valuelist in arg2params.items():
- # if we have a scope that is higher than function we need
+ # If we have a scope that is higher than function, we need
# to make sure we only ever create an according fixturedef on
# a per-scope basis. We thus store and cache the fixturedef on the
# node related to the scope.
@@ -203,7 +204,7 @@ def add_funcarg_pseudo_fixture_def(
node = get_scope_node(collector, scope)
if node is None:
assert scope == "class" and isinstance(collector, _pytest.python.Module)
- # use module-level collector for class-scope (for now)
+ # Use module-level collector for class-scope (for now).
node = collector
if node and argname in node._name2pseudofixturedef:
arg2fixturedefs[argname] = [node._name2pseudofixturedef[argname]]
@@ -224,7 +225,7 @@ def add_funcarg_pseudo_fixture_def(
def getfixturemarker(obj: object) -> Optional["FixtureFunctionMarker"]:
- """ return fixturemarker or None if it doesn't exist or raised
+ """Return fixturemarker or None if it doesn't exist or raised
exceptions."""
try:
fixturemarker = getattr(
@@ -242,7 +243,7 @@ _Key = Tuple[object, ...]
def get_parametrized_fixture_keys(item: "nodes.Item", scopenum: int) -> Iterator[_Key]:
- """ return list of keys for all parametrized arguments which match
+ """Return list of keys for all parametrized arguments which match
the specified scope. """
assert scopenum < scopenum_function # function
try:
@@ -269,10 +270,10 @@ def get_parametrized_fixture_keys(item: "nodes.Item", scopenum: int) -> Iterator
yield key
-# algorithm for sorting on a per-parametrized resource setup basis
-# it is called for scopenum==0 (session) first and performs sorting
+# Algorithm for sorting on a per-parametrized resource setup basis.
+# It is called for scopenum==0 (session) first and performs sorting
# down to the lower scopes such as to minimize number of "high scope"
-# setups and teardowns
+# setups and teardowns.
def reorder_items(items: "Sequence[nodes.Item]") -> "List[nodes.Item]":
@@ -339,7 +340,8 @@ def reorder_items_atscope(
no_argkey_group[item] = None
else:
slicing_argkey, _ = argkeys.popitem()
- # we don't have to remove relevant items from later in the deque because they'll just be ignored
+ # We don't have to remove relevant items from later in the
+ # deque because they'll just be ignored.
matching_items = [
i for i in scoped_items_by_argkey[slicing_argkey] if i in items
]
@@ -358,7 +360,7 @@ def reorder_items_atscope(
def fillfixtures(function: "Function") -> None:
- """ fill missing funcargs for a test function. """
+ """Fill missing funcargs for a test function."""
# Uncomment this after 6.0 release (#7361)
# warnings.warn(FILLFUNCARGS, stacklevel=2)
try:
@@ -373,7 +375,7 @@ def fillfixtures(function: "Function") -> None:
function._fixtureinfo = fi
request = function._request = FixtureRequest(function)
request._fillfixtures()
- # prune out funcargs for jstests
+ # Prune out funcargs for jstests.
newfuncargs = {}
for name in fi.argnames:
newfuncargs[name] = function.funcargs[name]
@@ -388,9 +390,9 @@ def get_direct_param_fixture_func(request):
@attr.s(slots=True)
class FuncFixtureInfo:
- # original function argument names
+ # Original function argument names.
argnames = attr.ib(type=Tuple[str, ...])
- # argnames that function immediately requires. These include argnames +
+ # Argnames that function immediately requires. These include argnames +
# fixture names specified via usefixtures and via autouse=True in fixture
# definitions.
initialnames = attr.ib(type=Tuple[str, ...])
@@ -398,7 +400,7 @@ class FuncFixtureInfo:
name2fixturedefs = attr.ib(type=Dict[str, Sequence["FixtureDef"]])
def prune_dependency_tree(self) -> None:
- """Recompute names_closure from initialnames and name2fixturedefs
+ """Recompute names_closure from initialnames and name2fixturedefs.
Can only reduce names_closure, which means that the new closure will
always be a subset of the old one. The order is preserved.
@@ -412,7 +414,7 @@ class FuncFixtureInfo:
working_set = set(self.initialnames)
while working_set:
argname = working_set.pop()
- # argname may be smth not included in the original names_closure,
+ # Argname may be smth not included in the original names_closure,
# in which case we ignore it. This currently happens with pseudo
# FixtureDefs which wrap 'get_direct_param_fixture_func(request)'.
# So they introduce the new dependency 'request' which might have
@@ -426,18 +428,18 @@ class FuncFixtureInfo:
class FixtureRequest:
- """ A request for a fixture from a test or fixture function.
+ """A request for a fixture from a test or fixture function.
- A request object gives access to the requesting test context
- and has an optional ``param`` attribute in case
- the fixture is parametrized indirectly.
+ A request object gives access to the requesting test context and has
+ an optional ``param`` attribute in case the fixture is parametrized
+ indirectly.
"""
def __init__(self, pyfuncitem) -> None:
self._pyfuncitem = pyfuncitem
- #: fixture for which this request is being performed
+ #: Fixture for which this request is being performed.
self.fixturename = None # type: Optional[str]
- #: Scope string, one of "function", "class", "module", "session"
+ #: Scope string, one of "function", "class", "module", "session".
self.scope = "function" # type: _Scope
self._fixture_defs = {} # type: Dict[str, FixtureDef]
fixtureinfo = pyfuncitem._fixtureinfo # type: FuncFixtureInfo
@@ -449,35 +451,35 @@ class FixtureRequest:
@property
def fixturenames(self) -> List[str]:
- """names of all active fixtures in this request"""
+ """Names of all active fixtures in this request."""
result = list(self._pyfuncitem._fixtureinfo.names_closure)
result.extend(set(self._fixture_defs).difference(result))
return result
@property
def funcargnames(self) -> List[str]:
- """ alias attribute for ``fixturenames`` for pre-2.3 compatibility"""
+ """Alias attribute for ``fixturenames`` for pre-2.3 compatibility."""
warnings.warn(FUNCARGNAMES, stacklevel=2)
return self.fixturenames
@property
def node(self):
- """ underlying collection node (depends on current request scope)"""
+ """Underlying collection node (depends on current request scope)."""
return self._getscopeitem(self.scope)
def _getnextfixturedef(self, argname: str) -> "FixtureDef":
fixturedefs = self._arg2fixturedefs.get(argname, None)
if fixturedefs is None:
- # we arrive here because of a dynamic call to
+ # We arrive here because of a dynamic call to
# getfixturevalue(argname) usage which was naturally
- # not known at parsing/collection time
+ # not known at parsing/collection time.
assert self._pyfuncitem.parent is not None
parentid = self._pyfuncitem.parent.nodeid
fixturedefs = self._fixturemanager.getfixturedefs(argname, parentid)
# TODO: Fix this type ignore. Either add assert or adjust types.
# Can this be None here?
self._arg2fixturedefs[argname] = fixturedefs # type: ignore[assignment]
- # fixturedefs list is immutable so we maintain a decreasing index
+ # fixturedefs list is immutable so we maintain a decreasing index.
index = self._arg2index.get(argname, 0) - 1
if fixturedefs is None or (-index > len(fixturedefs)):
raise FixtureLookupError(argname, self)
@@ -486,25 +488,25 @@ class FixtureRequest:
@property
def config(self) -> Config:
- """ the pytest config object associated with this request. """
+ """The pytest config object associated with this request."""
return self._pyfuncitem.config # type: ignore[no-any-return] # noqa: F723
@scopeproperty()
def function(self):
- """ test function object if the request has a per-function scope. """
+ """Test function object if the request has a per-function scope."""
return self._pyfuncitem.obj
@scopeproperty("class")
def cls(self):
- """ class (can be None) where the test function was collected. """
+ """Class (can be None) where the test function was collected."""
clscol = self._pyfuncitem.getparent(_pytest.python.Class)
if clscol:
return clscol.obj
@property
def instance(self):
- """ instance (can be None) on which test function was collected. """
- # unittest support hack, see _pytest.unittest.TestCaseFunction
+ """Instance (can be None) on which test function was collected."""
+ # unittest support hack, see _pytest.unittest.TestCaseFunction.
try:
return self._pyfuncitem._testcase
except AttributeError:
@@ -513,30 +515,29 @@ class FixtureRequest:
@scopeproperty()
def module(self):
- """ python module object where the test function was collected. """
+ """Python module object where the test function was collected."""
return self._pyfuncitem.getparent(_pytest.python.Module).obj
@scopeproperty()
def fspath(self) -> py.path.local:
- """ the file system path of the test module which collected this test. """
+ """The file system path of the test module which collected this test."""
# TODO: Remove ignore once _pyfuncitem is properly typed.
return self._pyfuncitem.fspath # type: ignore
@property
def keywords(self):
- """ keywords/markers dictionary for the underlying node. """
+ """Keywords/markers dictionary for the underlying node."""
return self.node.keywords
@property
def session(self):
- """ pytest session object. """
+ """Pytest session object."""
return self._pyfuncitem.session
def addfinalizer(self, finalizer: Callable[[], object]) -> None:
- """ add finalizer/teardown function to be called after the
- last test within the requesting test context finished
- execution. """
- # XXX usually this method is shadowed by fixturedef specific ones
+ """Add finalizer/teardown function to be called after the last test
+ within the requesting test context finished execution."""
+ # XXX usually this method is shadowed by fixturedef specific ones.
self._addfinalizer(finalizer, scope=self.scope)
def _addfinalizer(self, finalizer: Callable[[], object], scope) -> None:
@@ -546,17 +547,19 @@ class FixtureRequest:
)
def applymarker(self, marker) -> None:
- """ Apply a marker to a single test function invocation.
+ """Apply a marker to a single test function invocation.
+
This method is useful if you don't want to have a keyword/marker
on all function invocations.
- :arg marker: a :py:class:`_pytest.mark.MarkDecorator` object
- created by a call to ``pytest.mark.NAME(...)``.
+ :param marker:
+ A :py:class:`_pytest.mark.MarkDecorator` object created by a call
+ to ``pytest.mark.NAME(...)``.
"""
self.node.add_marker(marker)
def raiseerror(self, msg: Optional[str]) -> "NoReturn":
- """ raise a FixtureLookupError with the given message. """
+ """Raise a FixtureLookupError with the given message."""
raise self._fixturemanager.FixtureLookupError(None, self, msg)
def _fillfixtures(self) -> None:
@@ -567,14 +570,14 @@ class FixtureRequest:
item.funcargs[argname] = self.getfixturevalue(argname)
def getfixturevalue(self, argname: str) -> Any:
- """ Dynamically run a named fixture function.
+ """Dynamically run a named fixture function.
Declaring fixtures via function argument is recommended where possible.
But if you can only decide whether to use another fixture at test
setup time, you may use this function to retrieve it inside a fixture
or test function body.
- :raise pytest.FixtureLookupError:
+ :raises pytest.FixtureLookupError:
If the given fixture could not be found.
"""
fixturedef = self._get_active_fixturedef(argname)
@@ -595,8 +598,8 @@ class FixtureRequest:
scope = "function" # type: _Scope
return PseudoFixtureDef(cached_result, scope)
raise
- # remove indent to prevent the python3 exception
- # from leaking into the call
+ # Remove indent to prevent the python3 exception
+ # from leaking into the call.
self._compute_fixture_value(fixturedef)
self._fixture_defs[argname] = fixturedef
return fixturedef
@@ -614,10 +617,12 @@ class FixtureRequest:
current = current._parent_request
def _compute_fixture_value(self, fixturedef: "FixtureDef") -> None:
- """
- Creates a SubRequest based on "self" and calls the execute method of the given fixturedef object. This will
- force the FixtureDef object to throw away any previous results and compute a new fixture value, which
- will be stored into the FixtureDef object itself.
+ """Create a SubRequest based on "self" and call the execute method
+ of the given FixtureDef object.
+
+ This will force the FixtureDef object to throw away any previous
+ results and compute a new fixture value, which will be stored into
+ the FixtureDef object itself.
"""
# prepare a subrequest object before calling fixture function
# (latter managed by fixturedef)
@@ -667,18 +672,18 @@ class FixtureRequest:
fail(msg, pytrace=False)
else:
param_index = funcitem.callspec.indices[argname]
- # if a parametrize invocation set a scope it will override
- # the static scope defined with the fixture function
+ # If a parametrize invocation set a scope it will override
+ # the static scope defined with the fixture function.
paramscopenum = funcitem.callspec._arg2scopenum.get(argname)
if paramscopenum is not None:
scope = scopes[paramscopenum]
subrequest = SubRequest(self, scope, param, param_index, fixturedef)
- # check if a higher-level scoped fixture accesses a lower level one
+ # Check if a higher-level scoped fixture accesses a lower level one.
subrequest._check_scope(argname, self.scope, scope)
try:
- # call the fixture function
+ # Call the fixture function.
fixturedef.execute(request=subrequest)
finally:
self._schedule_finalizers(fixturedef, subrequest)
@@ -686,7 +691,7 @@ class FixtureRequest:
def _schedule_finalizers(
self, fixturedef: "FixtureDef", subrequest: "SubRequest"
) -> None:
- # if fixture function failed it might have registered finalizers
+ # If fixture function failed it might have registered finalizers.
self.session._setupstate.addfinalizer(
functools.partial(fixturedef.finish, request=subrequest), subrequest.node
)
@@ -695,7 +700,7 @@ class FixtureRequest:
if argname == "request":
return
if scopemismatch(invoking_scope, requested_scope):
- # try to report something helpful
+ # Try to report something helpful.
lines = self._factorytraceback()
fail(
"ScopeMismatch: You tried to access the %r scoped "
@@ -717,7 +722,7 @@ class FixtureRequest:
def _getscopeitem(self, scope):
if scope == "function":
- # this might also be a non-function Item despite its attribute name
+ # This might also be a non-function Item despite its attribute name.
return self._pyfuncitem
if scope == "package":
# FIXME: _fixturedef is not defined on FixtureRequest (this class),
@@ -726,7 +731,7 @@ class FixtureRequest:
else:
node = get_scope_node(self._pyfuncitem, scope)
if node is None and scope == "class":
- # fallback to function item itself
+ # Fallback to function item itself.
node = self._pyfuncitem
assert node, 'Could not obtain a node for scope "{}" for function {!r}'.format(
scope, self._pyfuncitem
@@ -738,8 +743,7 @@ class FixtureRequest:
class SubRequest(FixtureRequest):
- """ a sub request for handling getting a fixture from a
- test function/fixture. """
+ """A sub request for handling getting a fixture from a test function/fixture."""
def __init__(
self,
@@ -750,7 +754,7 @@ class SubRequest(FixtureRequest):
fixturedef: "FixtureDef",
) -> None:
self._parent_request = request
- self.fixturename = fixturedef.argname # type: str
+ self.fixturename = fixturedef.argname
if param is not NOTSET:
self.param = param
self.param_index = param_index
@@ -771,9 +775,9 @@ class SubRequest(FixtureRequest):
def _schedule_finalizers(
self, fixturedef: "FixtureDef", subrequest: "SubRequest"
) -> None:
- # if the executing fixturedef was not explicitly requested in the argument list (via
+ # If the executing fixturedef was not explicitly requested in the argument list (via
# getfixturevalue inside the fixture call) then ensure this fixture def will be finished
- # first
+ # first.
if fixturedef.argname not in self.fixturenames:
fixturedef.addfinalizer(
functools.partial(self._fixturedef.finish, request=self)
@@ -791,8 +795,7 @@ def scopemismatch(currentscope: "_Scope", newscope: "_Scope") -> bool:
def scope2index(scope: str, descr: str, where: Optional[str] = None) -> int:
"""Look up the index of ``scope`` and raise a descriptive value error
- if not defined.
- """
+ if not defined."""
strscopes = scopes # type: Sequence[str]
try:
return strscopes.index(scope)
@@ -806,7 +809,7 @@ def scope2index(scope: str, descr: str, where: Optional[str] = None) -> int:
class FixtureLookupError(LookupError):
- """ could not return a requested Fixture (missing or invalid). """
+ """Could not return a requested fixture (missing or invalid)."""
def __init__(
self, argname: Optional[str], request: FixtureRequest, msg: Optional[str] = None
@@ -823,8 +826,8 @@ class FixtureLookupError(LookupError):
stack.extend(map(lambda x: x.func, self.fixturestack))
msg = self.msg
if msg is not None:
- # the last fixture raise an error, let's present
- # it at the requesting side
+ # The last fixture raise an error, let's present
+ # it at the requesting side.
stack = stack[:-1]
for function in stack:
fspath, lineno = getfslineno(function)
@@ -925,8 +928,9 @@ def call_fixture_func(
def _teardown_yield_fixture(fixturefunc, it) -> None:
- """Executes the teardown of a fixture function by advancing the iterator after the
- yield and ensure the iteration ends (if not it means there is more than one yield in the function)"""
+ """Execute the teardown of a fixture function by advancing the iterator
+ after the yield and ensure the iteration ends (if not it means there is
+ more than one yield in the function)."""
try:
next(it)
except StopIteration:
@@ -961,7 +965,7 @@ def _eval_scope_callable(
class FixtureDef(Generic[_FixtureValue]):
- """ A container for a factory definition. """
+ """A container for a factory definition."""
def __init__(
self,
@@ -1023,16 +1027,15 @@ class FixtureDef(Generic[_FixtureValue]):
finally:
hook = self._fixturemanager.session.gethookproxy(request.node.fspath)
hook.pytest_fixture_post_finalizer(fixturedef=self, request=request)
- # even if finalization fails, we invalidate
- # the cached fixture value and remove
- # all finalizers because they may be bound methods which will
- # keep instances alive
+ # Even if finalization fails, we invalidate the cached fixture
+ # value and remove all finalizers because they may be bound methods
+ # which will keep instances alive.
self.cached_result = None
self._finalizers = []
def execute(self, request: SubRequest) -> _FixtureValue:
- # get required arguments and register our own finish()
- # with their finalization
+ # Get required arguments and register our own finish()
+ # with their finalization.
for argname in self.argnames:
fixturedef = request._get_active_fixturedef(argname)
if argname != "request":
@@ -1043,7 +1046,7 @@ class FixtureDef(Generic[_FixtureValue]):
my_cache_key = self.cache_key(request)
if self.cached_result is not None:
# note: comparison with `==` can fail (or be expensive) for e.g.
- # numpy arrays (#6497)
+ # numpy arrays (#6497).
cache_key = self.cached_result[1]
if my_cache_key is cache_key:
if self.cached_result[2] is not None:
@@ -1052,8 +1055,8 @@ class FixtureDef(Generic[_FixtureValue]):
else:
result = self.cached_result[0]
return result
- # we have a previous but differently parametrized fixture instance
- # so we need to tear it down before creating a new one
+ # We have a previous but differently parametrized fixture instance
+ # so we need to tear it down before creating a new one.
self.finish(request)
assert self.cached_result is None
@@ -1073,21 +1076,20 @@ class FixtureDef(Generic[_FixtureValue]):
def resolve_fixture_function(
fixturedef: FixtureDef[_FixtureValue], request: FixtureRequest
) -> "_FixtureFunc[_FixtureValue]":
- """Gets the actual callable that can be called to obtain the fixture value, dealing with unittest-specific
- instances and bound methods.
- """
+ """Get the actual callable that can be called to obtain the fixture
+ value, dealing with unittest-specific instances and bound methods."""
fixturefunc = fixturedef.func
if fixturedef.unittest:
if request.instance is not None:
- # bind the unbound method to the TestCase instance
+ # Bind the unbound method to the TestCase instance.
fixturefunc = fixturedef.func.__get__(request.instance) # type: ignore[union-attr]
else:
- # the fixture function needs to be bound to the actual
+ # The fixture function needs to be bound to the actual
# request.instance so that code working with "fixturedef" behaves
# as expected.
if request.instance is not None:
- # handle the case where fixture is defined not in a test class, but some other class
- # (for example a plugin class with a fixture), see #2270
+ # Handle the case where fixture is defined not in a test class, but some other class
+ # (for example a plugin class with a fixture), see #2270.
if hasattr(fixturefunc, "__self__") and not isinstance(
request.instance, fixturefunc.__self__.__class__ # type: ignore[union-attr]
):
@@ -1101,7 +1103,7 @@ def resolve_fixture_function(
def pytest_fixture_setup(
fixturedef: FixtureDef[_FixtureValue], request: SubRequest
) -> _FixtureValue:
- """ Execution of fixture setup. """
+ """Execution of fixture setup."""
kwargs = {}
for argname in fixturedef.argnames:
fixdef = request._get_active_fixturedef(argname)
@@ -1151,8 +1153,7 @@ def _params_converter(
def wrap_function_to_error_out_if_called_directly(function, fixture_marker):
"""Wrap the given fixture function so we can raise an error about it being called directly,
- instead of used as an argument in a test function.
- """
+ instead of used as an argument in a test function."""
message = (
'Fixture "{name}" called directly. Fixtures are not meant to be called directly,\n'
"but are created automatically when test functions request them as parameters.\n"
@@ -1164,8 +1165,8 @@ def wrap_function_to_error_out_if_called_directly(function, fixture_marker):
def result(*args, **kwargs):
fail(message, pytrace=False)
- # keep reference to the original function in our own custom attribute so we don't unwrap
- # further than this point and lose useful wrappings like @mock.patch (#3774)
+ # Keep reference to the original function in our own custom attribute so we don't unwrap
+ # further than this point and lose useful wrappings like @mock.patch (#3774).
result.__pytest_wrapped__ = _PytestWrapper(function) # type: ignore[attr-defined]
return result
@@ -1268,47 +1269,49 @@ def fixture( # noqa: F811
fixture function.
The name of the fixture function can later be referenced to cause its
- invocation ahead of running tests: test
- modules or classes can use the ``pytest.mark.usefixtures(fixturename)``
- marker.
-
- Test functions can directly use fixture names as input
- arguments in which case the fixture instance returned from the fixture
- function will be injected.
-
- Fixtures can provide their values to test functions using ``return`` or ``yield``
- statements. When using ``yield`` the code block after the ``yield`` statement is executed
- as teardown code regardless of the test outcome, and must yield exactly once.
-
- :arg scope: the scope for which this fixture is shared, one of
- ``"function"`` (default), ``"class"``, ``"module"``,
- ``"package"`` or ``"session"``.
-
- This parameter may also be a callable which receives ``(fixture_name, config)``
- as parameters, and must return a ``str`` with one of the values mentioned above.
-
- See :ref:`dynamic scope` in the docs for more information.
-
- :arg params: an optional list of parameters which will cause multiple
- invocations of the fixture function and all of the tests
- using it.
- The current parameter is available in ``request.param``.
-
- :arg autouse: if True, the fixture func is activated for all tests that
- can see it. If False (the default) then an explicit
- reference is needed to activate the fixture.
-
- :arg ids: list of string ids each corresponding to the params
- so that they are part of the test id. If no ids are provided
- they will be generated automatically from the params.
-
- :arg name: the name of the fixture. This defaults to the name of the
- decorated function. If a fixture is used in the same module in
- which it is defined, the function name of the fixture will be
- shadowed by the function arg that requests the fixture; one way
- to resolve this is to name the decorated function
- ``fixture_<fixturename>`` and then use
- ``@pytest.fixture(name='<fixturename>')``.
+ invocation ahead of running tests: test modules or classes can use the
+ ``pytest.mark.usefixtures(fixturename)`` marker.
+
+ Test functions can directly use fixture names as input arguments in which
+ case the fixture instance returned from the fixture function will be
+ injected.
+
+ Fixtures can provide their values to test functions using ``return`` or
+ ``yield`` statements. When using ``yield`` the code block after the
+ ``yield`` statement is executed as teardown code regardless of the test
+ outcome, and must yield exactly once.
+
+ :param scope:
+ The scope for which this fixture is shared; one of ``"function"``
+ (default), ``"class"``, ``"module"``, ``"package"`` or ``"session"``.
+
+ This parameter may also be a callable which receives ``(fixture_name, config)``
+ as parameters, and must return a ``str`` with one of the values mentioned above.
+
+ See :ref:`dynamic scope` in the docs for more information.
+
+ :param params:
+ An optional list of parameters which will cause multiple invocations
+ of the fixture function and all of the tests using it. The current
+ parameter is available in ``request.param``.
+
+ :param autouse:
+ If True, the fixture func is activated for all tests that can see it.
+ If False (the default), an explicit reference is needed to activate
+ the fixture.
+
+ :param ids:
+ List of string ids each corresponding to the params so that they are
+ part of the test id. If no ids are provided they will be generated
+ automatically from the params.
+
+ :param name:
+ The name of the fixture. This defaults to the name of the decorated
+ function. If a fixture is used in the same module in which it is
+ defined, the function name of the fixture will be shadowed by the
+ function arg that requests the fixture; one way to resolve this is to
+ name the decorated function ``fixture_<fixturename>`` and then use
+ ``@pytest.fixture(name='<fixturename>')``.
"""
# Positional arguments backward compatibility.
# If a kwarg is equal to its default, assume it was not explicitly
@@ -1377,7 +1380,7 @@ def yield_fixture(
ids=None,
name=None
):
- """ (return a) decorator to mark a yield-fixture factory function.
+ """(Return a) decorator to mark a yield-fixture factory function.
.. deprecated:: 3.0
Use :py:func:`pytest.fixture` directly instead.
@@ -1417,8 +1420,7 @@ def pytest_addoption(parser: Parser) -> None:
class FixtureManager:
- """
- pytest fixtures definitions and information is stored and managed
+ """pytest fixture definitions and information is stored and managed
from this class.
During collection fm.parsefactories() is called multiple times to parse
@@ -1431,7 +1433,7 @@ class FixtureManager:
which themselves offer a fixturenames attribute.
The FuncFixtureInfo object holds information about fixtures and FixtureDefs
- relevant for a particular function. An initial list of fixtures is
+ relevant for a particular function. An initial list of fixtures is
assembled like this:
- ini-defined usefixtures
@@ -1441,7 +1443,7 @@ class FixtureManager:
Subsequently the funcfixtureinfo.fixturenames attribute is computed
as the closure of the fixtures needed to setup the initial fixtures,
- i. e. fixtures needed by fixture functions themselves are appended
+ i.e. fixtures needed by fixture functions themselves are appended
to the fixturenames list.
Upon the test-setup phases all fixturenames are instantiated, retrieved
@@ -1462,13 +1464,13 @@ class FixtureManager:
session.config.pluginmanager.register(self, "funcmanage")
def _get_direct_parametrize_args(self, node: "nodes.Node") -> List[str]:
- """This function returns all the direct parametrization
- arguments of a node, so we don't mistake them for fixtures
+ """Return all direct parametrization arguments of a node, so we don't
+ mistake them for fixtures.
- Check https://github.com/pytest-dev/pytest/issues/5036
+ Check https://github.com/pytest-dev/pytest/issues/5036.
- This things are done later as well when dealing with parametrization
- so this could be improved
+ These things are done later as well when dealing with parametrization
+ so this could be improved.
"""
parametrize_argnames = [] # type: List[str]
for marker in node.iter_markers(name="parametrize"):
@@ -1507,9 +1509,9 @@ class FixtureManager:
else:
from _pytest import nodes
- # construct the base nodeid which is later used to check
+ # Construct the base nodeid which is later used to check
# what fixtures are visible for particular tests (as denoted
- # by their test id)
+ # by their test id).
if p.basename.startswith("conftest.py"):
nodeid = p.dirpath().relto(self.config.rootdir)
if p.sep != nodes.SEP:
@@ -1518,7 +1520,7 @@ class FixtureManager:
self.parsefactories(plugin, nodeid)
def _getautousenames(self, nodeid: str) -> List[str]:
- """ return a tuple of fixture names to be used. """
+ """Return a list of fixture names to be used."""
autousenames = [] # type: List[str]
for baseid, basenames in self._nodeid_and_autousenames:
if nodeid.startswith(baseid):
@@ -1533,12 +1535,12 @@ class FixtureManager:
def getfixtureclosure(
self, fixturenames: Tuple[str, ...], parentnode, ignore_args: Sequence[str] = ()
) -> Tuple[Tuple[str, ...], List[str], Dict[str, Sequence[FixtureDef]]]:
- # collect the closure of all fixtures , starting with the given
+ # Collect the closure of all fixtures, starting with the given
# fixturenames as the initial set. As we have to visit all
# factory definitions anyway, we also return an arg2fixturedefs
# mapping so that the caller can reuse it and does not have
# to re-discover fixturedefs again for each fixturename
- # (discovering matching fixtures for a given name/node is expensive)
+ # (discovering matching fixtures for a given name/node is expensive).
parentid = parentnode.nodeid
fixturenames_closure = self._getautousenames(parentid)
@@ -1550,7 +1552,7 @@ class FixtureManager:
merge(fixturenames)
- # at this point, fixturenames_closure contains what we call "initialnames",
+ # At this point, fixturenames_closure contains what we call "initialnames",
# which is a set of fixturenames the function immediately requests. We
# need to return it as well, so save this.
initialnames = tuple(fixturenames_closure)
@@ -1608,10 +1610,10 @@ class FixtureManager:
ids=fixturedef.ids,
)
else:
- continue # will raise FixtureLookupError at setup time
+ continue # Will raise FixtureLookupError at setup time.
def pytest_collection_modifyitems(self, items: "List[nodes.Item]") -> None:
- # separate parametrized setups
+ # Separate parametrized setups.
items[:] = reorder_items(items)
def parsefactories(
@@ -1633,16 +1635,17 @@ class FixtureManager:
obj = safe_getattr(holderobj, name, None)
marker = getfixturemarker(obj)
if not isinstance(marker, FixtureFunctionMarker):
- # magic globals with __getattr__ might have got us a wrong
- # fixture attribute
+ # Magic globals with __getattr__ might have got us a wrong
+ # fixture attribute.
continue
if marker.name:
name = marker.name
- # during fixture definition we wrap the original fixture function
- # to issue a warning if called directly, so here we unwrap it in order to not emit the warning
- # when pytest itself calls the fixture function
+ # During fixture definition we wrap the original fixture function
+ # to issue a warning if called directly, so here we unwrap it in
+ # order to not emit the warning when pytest itself calls the
+ # fixture function.
obj = get_real_method(obj, holderobj)
fixture_def = FixtureDef(
@@ -1675,12 +1678,11 @@ class FixtureManager:
def getfixturedefs(
self, argname: str, nodeid: str
) -> Optional[Sequence[FixtureDef]]:
- """
- Gets a list of fixtures which are applicable to the given node id.
+ """Get a list of fixtures which are applicable to the given node id.
- :param str argname: name of the fixture to search for
- :param str nodeid: full node id of the requesting test.
- :return: list[FixtureDef]
+ :param str argname: Name of the fixture to search for.
+ :param str nodeid: Full node id of the requesting test.
+ :rtype: Sequence[FixtureDef]
"""
try:
fixturedefs = self._arg2fixturedefs[argname]
diff --git a/src/_pytest/freeze_support.py b/src/_pytest/freeze_support.py
index 63c14eceb..8b93ed5f7 100644
--- a/src/_pytest/freeze_support.py
+++ b/src/_pytest/freeze_support.py
@@ -1,7 +1,5 @@
-"""
-Provides a function to report all internal modules for using freezing tools
-pytest
-"""
+"""Provides a function to report all internal modules for using freezing
+tools."""
import types
from typing import Iterator
from typing import List
@@ -9,10 +7,8 @@ from typing import Union
def freeze_includes() -> List[str]:
- """
- Returns a list of module names used by pytest that should be
- included by cx_freeze.
- """
+ """Return a list of module names used by pytest that should be
+ included by cx_freeze."""
import py
import _pytest
@@ -24,8 +20,7 @@ def freeze_includes() -> List[str]:
def _iter_all_modules(
package: Union[str, types.ModuleType], prefix: str = "",
) -> Iterator[str]:
- """
- Iterates over the names of all modules that can be found in the given
+ """Iterate over the names of all modules that can be found in the given
package, recursively.
>>> import _pytest
diff --git a/src/_pytest/helpconfig.py b/src/_pytest/helpconfig.py
index f3623b8a1..348a65ede 100644
--- a/src/_pytest/helpconfig.py
+++ b/src/_pytest/helpconfig.py
@@ -1,4 +1,4 @@
-""" version info, help messages, tracing configuration. """
+"""Version info, help messages, tracing configuration."""
import os
import sys
from argparse import Action
@@ -16,8 +16,9 @@ from _pytest.config.argparsing import Parser
class HelpAction(Action):
- """This is an argparse Action that will raise an exception in
- order to skip the rest of the argument parsing when --help is passed.
+ """An argparse Action that will raise an exception in order to skip the
+ rest of the argument parsing when --help is passed.
+
This prevents argparse from quitting due to missing required arguments
when any are defined, for example by ``pytest_addoption``.
This is similar to the way that the builtin argparse --help option is
@@ -37,7 +38,7 @@ class HelpAction(Action):
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, self.const)
- # We should only skip the rest of the parsing after preparse is done
+ # We should only skip the rest of the parsing after preparse is done.
if getattr(parser._parser, "after_preparse", False):
raise PrintHelp
diff --git a/src/_pytest/hookspec.py b/src/_pytest/hookspec.py
index d21c4d4d9..60b1b643a 100644
--- a/src/_pytest/hookspec.py
+++ b/src/_pytest/hookspec.py
@@ -1,4 +1,5 @@
-""" hook specifications for pytest plugins, invoked from main.py and builtin plugins. """
+"""Hook specifications for pytest plugins which are invoked by pytest itself
+and by builtin plugins."""
from typing import Any
from typing import Dict
from typing import List
@@ -51,11 +52,10 @@ hookspec = HookspecMarker("pytest")
@hookspec(historic=True)
def pytest_addhooks(pluginmanager: "PytestPluginManager") -> None:
- """called at plugin registration time to allow adding new hooks via a call to
+ """Called at plugin registration time to allow adding new hooks via a call to
``pluginmanager.add_hookspecs(module_or_class, prefix)``.
-
- :param _pytest.config.PytestPluginManager pluginmanager: pytest plugin manager
+ :param _pytest.config.PytestPluginManager pluginmanager: pytest plugin manager.
.. note::
This hook is incompatible with ``hookwrapper=True``.
@@ -66,10 +66,10 @@ def pytest_addhooks(pluginmanager: "PytestPluginManager") -> None:
def pytest_plugin_registered(
plugin: "_PluggyPlugin", manager: "PytestPluginManager"
) -> None:
- """ a new pytest plugin got registered.
+ """A new pytest plugin got registered.
- :param plugin: the plugin module or instance
- :param _pytest.config.PytestPluginManager manager: pytest plugin manager
+ :param plugin: The plugin module or instance.
+ :param _pytest.config.PytestPluginManager manager: pytest plugin manager.
.. note::
This hook is incompatible with ``hookwrapper=True``.
@@ -78,7 +78,7 @@ def pytest_plugin_registered(
@hookspec(historic=True)
def pytest_addoption(parser: "Parser", pluginmanager: "PytestPluginManager") -> None:
- """register argparse-style options and ini-style config values,
+ """Register argparse-style options and ini-style config values,
called once at the beginning of a test run.
.. note::
@@ -87,15 +87,16 @@ def pytest_addoption(parser: "Parser", pluginmanager: "PytestPluginManager") ->
files situated at the tests root directory due to how pytest
:ref:`discovers plugins during startup <pluginorder>`.
- :arg _pytest.config.argparsing.Parser parser: To add command line options, call
+ :param _pytest.config.argparsing.Parser parser:
+ To add command line options, call
:py:func:`parser.addoption(...) <_pytest.config.argparsing.Parser.addoption>`.
To add ini-file values call :py:func:`parser.addini(...)
<_pytest.config.argparsing.Parser.addini>`.
- :arg _pytest.config.PytestPluginManager pluginmanager: pytest plugin manager,
- which can be used to install :py:func:`hookspec`'s or :py:func:`hookimpl`'s
- and allow one plugin to call another plugin's hooks to change how
- command line options are added.
+ :param _pytest.config.PytestPluginManager pluginmanager:
+ pytest plugin manager, which can be used to install :py:func:`hookspec`'s
+ or :py:func:`hookimpl`'s and allow one plugin to call another plugin's hooks
+ to change how command line options are added.
Options can later be accessed through the
:py:class:`config <_pytest.config.Config>` object, respectively:
@@ -116,8 +117,7 @@ def pytest_addoption(parser: "Parser", pluginmanager: "PytestPluginManager") ->
@hookspec(historic=True)
def pytest_configure(config: "Config") -> None:
- """
- Allows plugins and conftest files to perform initial configuration.
+ """Allow plugins and conftest files to perform initial configuration.
This hook is called for every plugin and initial conftest file
after command line options have been parsed.
@@ -128,7 +128,7 @@ def pytest_configure(config: "Config") -> None:
.. note::
This hook is incompatible with ``hookwrapper=True``.
- :arg _pytest.config.Config config: pytest config object
+ :param _pytest.config.Config config: The pytest config object.
"""
@@ -142,16 +142,17 @@ def pytest_configure(config: "Config") -> None:
def pytest_cmdline_parse(
pluginmanager: "PytestPluginManager", args: List[str]
) -> Optional["Config"]:
- """return initialized config object, parsing the specified args.
+ """Return an initialized config object, parsing the specified args.
- Stops at first non-None result, see :ref:`firstresult`
+ Stops at first non-None result, see :ref:`firstresult`.
.. note::
- This hook will only be called for plugin classes passed to the ``plugins`` arg when using `pytest.main`_ to
- perform an in-process test run.
+ This hook will only be called for plugin classes passed to the
+ ``plugins`` arg when using `pytest.main`_ to perform an in-process
+ test run.
- :param _pytest.config.PytestPluginManager pluginmanager: pytest plugin manager
- :param list[str] args: list of arguments passed on the command line
+ :param _pytest.config.PytestPluginManager pluginmanager: Pytest plugin manager.
+ :param List[str] args: List of arguments passed on the command line.
"""
@@ -164,37 +165,37 @@ def pytest_cmdline_preparse(config: "Config", args: List[str]) -> None:
.. note::
This hook will not be called for ``conftest.py`` files, only for setuptools plugins.
- :param _pytest.config.Config config: pytest config object
- :param list[str] args: list of arguments passed on the command line
+ :param _pytest.config.Config config: The pytest config object.
+ :param List[str] args: Arguments passed on the command line.
"""
@hookspec(firstresult=True)
def pytest_cmdline_main(config: "Config") -> Optional[Union["ExitCode", int]]:
- """ called for performing the main command line action. The default
+ """Called for performing the main command line action. The default
implementation will invoke the configure hooks and runtest_mainloop.
.. note::
This hook will not be called for ``conftest.py`` files, only for setuptools plugins.
- Stops at first non-None result, see :ref:`firstresult`
+ Stops at first non-None result, see :ref:`firstresult`.
- :param _pytest.config.Config config: pytest config object
+ :param _pytest.config.Config config: The pytest config object.
"""
def pytest_load_initial_conftests(
early_config: "Config", parser: "Parser", args: List[str]
) -> None:
- """ implements the loading of initial conftest files ahead
+ """Called to implement the loading of initial conftest files ahead
of command line option parsing.
.. note::
This hook will not be called for ``conftest.py`` files, only for setuptools plugins.
- :param _pytest.config.Config early_config: pytest config object
- :param list[str] args: list of arguments passed on the command line
- :param _pytest.config.argparsing.Parser parser: to add command line options
+ :param _pytest.config.Config early_config: The pytest config object.
+ :param List[str] args: Arguments passed on the command line.
+ :param _pytest.config.argparsing.Parser parser: To add command line options.
"""
@@ -224,26 +225,26 @@ def pytest_collection(session: "Session") -> Optional[object]:
for example the terminal plugin uses it to start displaying the collection
counter (and returns `None`).
- :param _pytest.main.Session session: the pytest session object
+ :param _pytest.main.Session session: The pytest session object.
"""
def pytest_collection_modifyitems(
session: "Session", config: "Config", items: List["Item"]
) -> None:
- """ called after collection has been performed, may filter or re-order
+ """Called after collection has been performed. May filter or re-order
the items in-place.
- :param _pytest.main.Session session: the pytest session object
- :param _pytest.config.Config config: pytest config object
- :param List[_pytest.nodes.Item] items: list of item objects
+ :param _pytest.main.Session session: The pytest session object.
+ :param _pytest.config.Config config: The pytest config object.
+ :param List[_pytest.nodes.Item] items: List of item objects.
"""
def pytest_collection_finish(session: "Session") -> None:
"""Called after collection has been performed and modified.
- :param _pytest.main.Session session: the pytest session object
+ :param _pytest.main.Session session: The pytest session object.
"""
@@ -256,8 +257,8 @@ def pytest_ignore_collect(path: py.path.local, config: "Config") -> Optional[boo
Stops at first non-None result, see :ref:`firstresult`.
- :param path: a :py:class:`py.path.local` - the path to analyze
- :param _pytest.config.Config config: pytest config object
+ :param py.path.local path: The path to analyze.
+ :param _pytest.config.Config config: The pytest config object.
"""
@@ -267,7 +268,7 @@ def pytest_collect_directory(path: py.path.local, parent) -> Optional[object]:
Stops at first non-None result, see :ref:`firstresult`.
- :param path: a :py:class:`py.path.local` - the path to analyze
+ :param py.path.local path: The path to analyze.
"""
@@ -276,7 +277,7 @@ def pytest_collect_file(path: py.path.local, parent) -> "Optional[Collector]":
Any new node needs to have the specified ``parent`` as a parent.
- :param path: a :py:class:`py.path.local` - the path to collect
+ :param py.path.local path: The path to collect.
"""
@@ -284,7 +285,7 @@ def pytest_collect_file(path: py.path.local, parent) -> "Optional[Collector]":
def pytest_collectstart(collector: "Collector") -> None:
- """ collector starts collecting. """
+ """Collector starts collecting."""
def pytest_itemcollected(item: "Item") -> None:
@@ -292,7 +293,7 @@ def pytest_itemcollected(item: "Item") -> None:
def pytest_collectreport(report: "CollectReport") -> None:
- """ collector finished collecting. """
+ """Collector finished collecting."""
def pytest_deselected(items: Sequence["Item"]) -> None:
@@ -301,9 +302,10 @@ def pytest_deselected(items: Sequence["Item"]) -> None:
@hookspec(firstresult=True)
def pytest_make_collect_report(collector: "Collector") -> "Optional[CollectReport]":
- """ perform ``collector.collect()`` and return a CollectReport.
+ """Perform ``collector.collect()`` and return a CollectReport.
- Stops at first non-None result, see :ref:`firstresult` """
+ Stops at first non-None result, see :ref:`firstresult`.
+ """
# -------------------------------------------------------------------------
@@ -321,7 +323,7 @@ def pytest_pycollect_makemodule(path: py.path.local, parent) -> Optional["Module
Stops at first non-None result, see :ref:`firstresult`.
- :param path: a :py:class:`py.path.local` - the path of module to collect
+ :param py.path.local path: The path of module to collect.
"""
@@ -337,28 +339,31 @@ def pytest_pycollect_makeitem(
@hookspec(firstresult=True)
def pytest_pyfunc_call(pyfuncitem: "Function") -> Optional[object]:
- """ call underlying test function.
+ """Call underlying test function.
- Stops at first non-None result, see :ref:`firstresult` """
+ Stops at first non-None result, see :ref:`firstresult`.
+ """
def pytest_generate_tests(metafunc: "Metafunc") -> None:
- """ generate (multiple) parametrized calls to a test function."""
+ """Generate (multiple) parametrized calls to a test function."""
@hookspec(firstresult=True)
def pytest_make_parametrize_id(
config: "Config", val: object, argname: str
) -> Optional[str]:
- """Return a user-friendly string representation of the given ``val`` that will be used
- by @pytest.mark.parametrize calls. Return None if the hook doesn't know about ``val``.
+ """Return a user-friendly string representation of the given ``val``
+ that will be used by @pytest.mark.parametrize calls, or None if the hook
+ doesn't know about ``val``.
+
The parameter name is available as ``argname``, if required.
- Stops at first non-None result, see :ref:`firstresult`
+ Stops at first non-None result, see :ref:`firstresult`.
- :param _pytest.config.Config config: pytest config object
- :param val: the parametrized value
- :param str argname: the automatic parameter name produced by pytest
+ :param _pytest.config.Config config: The pytest config object.
+ :param val: The parametrized value.
+ :param str argname: The automatic parameter name produced by pytest.
"""
@@ -369,7 +374,7 @@ def pytest_make_parametrize_id(
@hookspec(firstresult=True)
def pytest_runtestloop(session: "Session") -> Optional[object]:
- """Performs the main runtest loop (after collection finished).
+ """Perform the main runtest loop (after collection finished).
The default hook implementation performs the runtest protocol for all items
collected in the session (``session.items``), unless the collection failed
@@ -392,7 +397,7 @@ def pytest_runtestloop(session: "Session") -> Optional[object]:
def pytest_runtest_protocol(
item: "Item", nextitem: "Optional[Item]"
) -> Optional[object]:
- """Performs the runtest protocol for a single test item.
+ """Perform the runtest protocol for a single test item.
The default runtest protocol is this (see individual hooks for full details):
@@ -418,9 +423,8 @@ def pytest_runtest_protocol(
- ``pytest_runtest_logfinish(nodeid, location)``
- :arg item: Test item for which the runtest protocol is performed.
-
- :arg nextitem: The scheduled-to-be-next test item (or None if this is the end my friend).
+ :param item: Test item for which the runtest protocol is performed.
+ :param nextitem: The scheduled-to-be-next test item (or None if this is the end my friend).
Stops at first non-None result, see :ref:`firstresult`.
The return value is not used, but only stops further processing.
@@ -476,10 +480,11 @@ def pytest_runtest_teardown(item: "Item", nextitem: Optional["Item"]) -> None:
includes running the teardown phase of fixtures required by the item (if
they go out of scope).
- :arg nextitem: The scheduled-to-be-next test item (None if no further
- test item is scheduled). This argument can be used to
- perform exact teardowns, i.e. calling just enough finalizers
- so that nextitem only needs to call setup-functions.
+ :param nextitem:
+ The scheduled-to-be-next test item (None if no further test item is
+ scheduled). This argument can be used to perform exact teardowns,
+ i.e. calling just enough finalizers so that nextitem only needs to
+ call setup-functions.
"""
@@ -510,19 +515,15 @@ def pytest_runtest_logreport(report: "TestReport") -> None:
def pytest_report_to_serializable(
config: "Config", report: Union["CollectReport", "TestReport"],
) -> Optional[Dict[str, Any]]:
- """
- Serializes the given report object into a data structure suitable for sending
- over the wire, e.g. converted to JSON.
- """
+ """Serialize the given report object into a data structure suitable for
+ sending over the wire, e.g. converted to JSON."""
@hookspec(firstresult=True)
def pytest_report_from_serializable(
config: "Config", data: Dict[str, Any],
) -> Optional[Union["CollectReport", "TestReport"]]:
- """
- Restores a report object previously serialized with pytest_report_to_serializable().
- """
+ """Restore a report object previously serialized with pytest_report_to_serializable()."""
# -------------------------------------------------------------------------
@@ -534,9 +535,9 @@ def pytest_report_from_serializable(
def pytest_fixture_setup(
fixturedef: "FixtureDef", request: "SubRequest"
) -> Optional[object]:
- """Performs fixture setup execution.
+ """Perform fixture setup execution.
- :return: The return value of the call to the fixture function.
+ :returns: The return value of the call to the fixture function.
Stops at first non-None result, see :ref:`firstresult`.
@@ -564,7 +565,7 @@ def pytest_sessionstart(session: "Session") -> None:
"""Called after the ``Session`` object has been created and before performing collection
and entering the run test loop.
- :param _pytest.main.Session session: the pytest session object
+ :param _pytest.main.Session session: The pytest session object.
"""
@@ -573,15 +574,15 @@ def pytest_sessionfinish(
) -> None:
"""Called after whole test run finished, right before returning the exit status to the system.
- :param _pytest.main.Session session: the pytest session object
- :param int exitstatus: the status which pytest will return to the system
+ :param _pytest.main.Session session: The pytest session object.
+ :param int exitstatus: The status which pytest will return to the system.
"""
def pytest_unconfigure(config: "Config") -> None:
"""Called before test process is exited.
- :param _pytest.config.Config config: pytest config object
+ :param _pytest.config.Config config: The pytest config object.
"""
@@ -596,22 +597,19 @@ def pytest_assertrepr_compare(
"""Return explanation for comparisons in failing assert expressions.
Return None for no custom explanation, otherwise return a list
- of strings. The strings will be joined by newlines but any newlines
- *in* a string will be escaped. Note that all but the first line will
+ of strings. The strings will be joined by newlines but any newlines
+ *in* a string will be escaped. Note that all but the first line will
be indented slightly, the intention is for the first line to be a summary.
- :param _pytest.config.Config config: pytest config object
+ :param _pytest.config.Config config: The pytest config object.
"""
def pytest_assertion_pass(item: "Item", lineno: int, orig: str, expl: str) -> None:
- """
- **(Experimental)**
+ """**(Experimental)** Called whenever an assertion passes.
.. versionadded:: 5.0
- Hook called whenever an assertion *passes*.
-
Use this hook to do some processing after a passing assertion.
The original assertion information is available in the `orig` string
and the pytest introspected assertion information is available in the
@@ -628,32 +626,32 @@ def pytest_assertion_pass(item: "Item", lineno: int, orig: str, expl: str) -> No
You need to **clean the .pyc** files in your project directory and interpreter libraries
when enabling this option, as assertions will require to be re-written.
- :param _pytest.nodes.Item item: pytest item object of current test
- :param int lineno: line number of the assert statement
- :param string orig: string with original assertion
- :param string expl: string with assert explanation
+ :param _pytest.nodes.Item item: pytest item object of current test.
+ :param int lineno: Line number of the assert statement.
+ :param str orig: String with the original assertion.
+ :param str expl: String with the assert explanation.
.. note::
This hook is **experimental**, so its parameters or even the hook itself might
be changed/removed without warning in any future pytest release.
- If you find this hook useful, please share your feedback opening an issue.
+ If you find this hook useful, please share your feedback in an issue.
"""
# -------------------------------------------------------------------------
-# hooks for influencing reporting (invoked from _pytest_terminal)
+# Hooks for influencing reporting (invoked from _pytest_terminal).
# -------------------------------------------------------------------------
def pytest_report_header(
config: "Config", startdir: py.path.local
) -> Union[str, List[str]]:
- """ return a string or list of strings to be displayed as header info for terminal reporting.
+ """Return a string or list of strings to be displayed as header info for terminal reporting.
- :param _pytest.config.Config config: pytest config object
- :param startdir: py.path object with the starting dir
+ :param _pytest.config.Config config: The pytest config object.
+ :param py.path.local startdir: The starting dir.
.. note::
@@ -673,16 +671,16 @@ def pytest_report_header(
def pytest_report_collectionfinish(
config: "Config", startdir: py.path.local, items: Sequence["Item"],
) -> Union[str, List[str]]:
- """
- .. versionadded:: 3.2
-
- Return a string or list of strings to be displayed after collection has finished successfully.
+ """Return a string or list of strings to be displayed after collection
+ has finished successfully.
These strings will be displayed after the standard "collected X items" message.
- :param _pytest.config.Config config: pytest config object
- :param startdir: py.path object with the starting dir
- :param items: list of pytest items that are going to be executed; this list should not be modified.
+ .. versionadded:: 3.2
+
+ :param _pytest.config.Config config: The pytest config object.
+ :param py.path.local startdir: The starting dir.
+ :param items: List of pytest items that are going to be executed; this list should not be modified.
.. note::
@@ -727,9 +725,9 @@ def pytest_terminal_summary(
) -> None:
"""Add a section to terminal summary reporting.
- :param _pytest.terminal.TerminalReporter terminalreporter: the internal terminal reporter object
- :param int exitstatus: the exit status that will be reported back to the OS
- :param _pytest.config.Config config: pytest config object
+ :param _pytest.terminal.TerminalReporter terminalreporter: The internal terminal reporter object.
+ :param int exitstatus: The exit status that will be reported back to the OS.
+ :param _pytest.config.Config config: The pytest config object.
.. versionadded:: 4.2
The ``config`` parameter.
@@ -780,8 +778,7 @@ def pytest_warning_recorded(
nodeid: str,
location: Optional[Tuple[str, int, str]],
) -> None:
- """
- Process a warning captured by the internal pytest warnings plugin.
+ """Process a warning captured by the internal pytest warnings plugin.
:param warnings.WarningMessage warning_message:
The captured warning. This is the same object produced by :py:func:`warnings.catch_warnings`, and contains
@@ -794,7 +791,8 @@ def pytest_warning_recorded(
* ``"collect"``: during test collection.
* ``"runtest"``: during test execution.
- :param str nodeid: full id of the item
+ :param str nodeid:
+ Full id of the item.
:param tuple|None location:
When available, holds information about the execution context of the captured
@@ -823,7 +821,7 @@ def pytest_internalerror(
def pytest_keyboard_interrupt(
excinfo: "ExceptionInfo[Union[KeyboardInterrupt, Exit]]",
) -> None:
- """ called for keyboard interrupt. """
+ """Called for keyboard interrupt."""
def pytest_exception_interact(
@@ -846,20 +844,22 @@ def pytest_exception_interact(
def pytest_enter_pdb(config: "Config", pdb: "pdb.Pdb") -> None:
- """ called upon pdb.set_trace(), can be used by plugins to take special
- action just before the python debugger enters in interactive mode.
+ """Called upon pdb.set_trace().
+
+ Can be used by plugins to take special action just before the python
+ debugger enters interactive mode.
- :param _pytest.config.Config config: pytest config object
- :param pdb.Pdb pdb: Pdb instance
+ :param _pytest.config.Config config: The pytest config object.
+ :param pdb.Pdb pdb: The Pdb instance.
"""
def pytest_leave_pdb(config: "Config", pdb: "pdb.Pdb") -> None:
- """ called when leaving pdb (e.g. with continue after pdb.set_trace()).
+ """Called when leaving pdb (e.g. with continue after pdb.set_trace()).
Can be used by plugins to take special action just after the python
debugger leaves interactive mode.
- :param _pytest.config.Config config: pytest config object
- :param pdb.Pdb pdb: Pdb instance
+ :param _pytest.config.Config config: The pytest config object.
+ :param pdb.Pdb pdb: The Pdb instance.
"""
diff --git a/src/_pytest/junitxml.py b/src/_pytest/junitxml.py
index 28ae69e82..6e3785b7d 100644
--- a/src/_pytest/junitxml.py
+++ b/src/_pytest/junitxml.py
@@ -1,12 +1,10 @@
-"""
- report test results in JUnit-XML format,
- for use with Jenkins and build integration servers.
-
+"""Report test results in JUnit-XML format, for use with Jenkins and build
+integration servers.
Based on initial code from Ross Lawley.
-Output conforms to https://github.com/jenkinsci/xunit-plugin/blob/master/
-src/main/resources/org/jenkinsci/plugins/xunit/types/model/xsd/junit-10.xsd
+Output conforms to
+https://github.com/jenkinsci/xunit-plugin/blob/master/src/main/resources/org/jenkinsci/plugins/xunit/types/model/xsd/junit-10.xsd
"""
import functools
import os
@@ -81,11 +79,11 @@ families = {}
families["_base"] = {"testcase": ["classname", "name"]}
families["_base_legacy"] = {"testcase": ["file", "line", "url"]}
-# xUnit 1.x inherits legacy attributes
+# xUnit 1.x inherits legacy attributes.
families["xunit1"] = families["_base"].copy()
merge_family(families["xunit1"], families["_base_legacy"])
-# xUnit 2.x uses strict base attributes
+# xUnit 2.x uses strict base attributes.
families["xunit2"] = families["_base"]
@@ -111,8 +109,7 @@ class _NodeReporter:
self.attrs[str(name)] = bin_xml_escape(value)
def make_properties_node(self) -> Optional[ET.Element]:
- """Return a Junit node containing custom properties, if any.
- """
+ """Return a Junit node containing custom properties, if any."""
if self.properties:
properties = ET.Element("properties")
for name, value in self.properties:
@@ -136,9 +133,9 @@ class _NodeReporter:
if hasattr(testreport, "url"):
attrs["url"] = testreport.url
self.attrs = attrs
- self.attrs.update(existing_attrs) # restore any user-defined attributes
+ self.attrs.update(existing_attrs) # Restore any user-defined attributes.
- # Preserve legacy testcase behavior
+ # Preserve legacy testcase behavior.
if self.family == "xunit1":
return
@@ -262,7 +259,7 @@ class _NodeReporter:
def _warn_incompatibility_with_xunit2(
request: FixtureRequest, fixture_name: str
) -> None:
- """Emits a PytestWarning about the given fixture being incompatible with newer xunit revisions"""
+ """Emit a PytestWarning about the given fixture being incompatible with newer xunit revisions."""
from _pytest.warning_types import PytestWarning
xml = request.config._store.get(xml_key, None)
@@ -330,7 +327,7 @@ def record_xml_attribute(request: FixtureRequest) -> Callable[[str, object], Non
def _check_record_param_type(param: str, v: str) -> None:
"""Used by record_testsuite_property to check that the given parameter name is of the proper
- type"""
+ type."""
__tracebackhide__ = True
if not isinstance(v, str):
msg = "{param} parameter needs to be a string, but {g} given"
@@ -339,9 +336,10 @@ def _check_record_param_type(param: str, v: str) -> None:
@pytest.fixture(scope="session")
def record_testsuite_property(request: FixtureRequest) -> Callable[[str, object], None]:
- """
- Records a new ``<property>`` tag as child of the root ``<testsuite>``. This is suitable to
- writing global information regarding the entire test suite, and is compatible with ``xunit2`` JUnit family.
+ """Record a new ``<property>`` tag as child of the root ``<testsuite>``.
+
+ This is suitable to writing global information regarding the entire test
+ suite, and is compatible with ``xunit2`` JUnit family.
This is a ``session``-scoped fixture which is called with ``(name, value)``. Example:
@@ -357,7 +355,7 @@ def record_testsuite_property(request: FixtureRequest) -> Callable[[str, object]
__tracebackhide__ = True
def record_func(name: str, value: object) -> None:
- """noop function in case --junitxml was not passed in the command-line"""
+ """No-op function in case --junitxml was not passed in the command-line."""
__tracebackhide__ = True
_check_record_param_type("name", name)
@@ -414,7 +412,7 @@ def pytest_addoption(parser: Parser) -> None:
def pytest_configure(config: Config) -> None:
xmlpath = config.option.xmlpath
- # prevent opening xmllog on worker nodes (xdist)
+ # Prevent opening xmllog on worker nodes (xdist).
if xmlpath and not hasattr(config, "workerinput"):
junit_family = config.getini("junit_family")
if not junit_family:
@@ -446,10 +444,10 @@ def mangle_test_address(address: str) -> List[str]:
names.remove("()")
except ValueError:
pass
- # convert file path to dotted path
+ # Convert file path to dotted path.
names[0] = names[0].replace(nodes.SEP, ".")
names[0] = re.sub(r"\.py$", "", names[0])
- # put any params back
+ # Put any params back.
names[-1] += possible_open_bracket + params
return names
@@ -486,13 +484,13 @@ class LogXML:
self.open_reports = [] # type: List[TestReport]
self.cnt_double_fail_tests = 0
- # Replaces convenience family with real family
+ # Replaces convenience family with real family.
if self.family == "legacy":
self.family = "xunit1"
def finalize(self, report: TestReport) -> None:
nodeid = getattr(report, "nodeid", report)
- # local hack to handle xdist report order
+ # Local hack to handle xdist report order.
workernode = getattr(report, "node", None)
reporter = self.node_reporters.pop((nodeid, workernode))
if reporter is not None:
@@ -500,7 +498,7 @@ class LogXML:
def node_reporter(self, report: Union[TestReport, str]) -> _NodeReporter:
nodeid = getattr(report, "nodeid", report) # type: Union[str, TestReport]
- # local hack to handle xdist report order
+ # Local hack to handle xdist report order.
workernode = getattr(report, "node", None)
key = nodeid, workernode
@@ -526,13 +524,13 @@ class LogXML:
return reporter
def pytest_runtest_logreport(self, report: TestReport) -> None:
- """handle a setup/call/teardown report, generating the appropriate
- xml tags as necessary.
+ """Handle a setup/call/teardown report, generating the appropriate
+ XML tags as necessary.
- note: due to plugins like xdist, this hook may be called in interlaced
- order with reports from other nodes. for example:
+ Note: due to plugins like xdist, this hook may be called in interlaced
+ order with reports from other nodes. For example:
- usual call order:
+ Usual call order:
-> setup node1
-> call node1
-> teardown node1
@@ -540,7 +538,7 @@ class LogXML:
-> call node2
-> teardown node2
- possible call order in xdist:
+ Possible call order in xdist:
-> setup node1
-> call node1
-> setup node2
@@ -555,7 +553,7 @@ class LogXML:
reporter.append_pass(report)
elif report.failed:
if report.when == "teardown":
- # The following vars are needed when xdist plugin is used
+ # The following vars are needed when xdist plugin is used.
report_wid = getattr(report, "worker_id", None)
report_ii = getattr(report, "item_index", None)
close_report = next(
@@ -573,7 +571,7 @@ class LogXML:
if close_report:
# We need to open new testcase in case we have failure in
# call and error in teardown in order to follow junit
- # schema
+ # schema.
self.finalize(close_report)
self.cnt_double_fail_tests += 1
reporter = self._opentestcase(report)
@@ -614,9 +612,8 @@ class LogXML:
self.open_reports.remove(close_report)
def update_testcase_duration(self, report: TestReport) -> None:
- """accumulates total duration for nodeid from given report and updates
- the Junit.testcase with the new total if already created.
- """
+ """Accumulate total duration for nodeid from given report and update
+ the Junit.testcase with the new total if already created."""
if self.report_duration == "total" or report.when == self.report_duration:
reporter = self.node_reporter(report)
reporter.duration += getattr(report, "duration", 0.0)
@@ -684,8 +681,7 @@ class LogXML:
self.global_properties.append((name, bin_xml_escape(value)))
def _get_global_properties_node(self) -> Optional[ET.Element]:
- """Return a Junit node containing custom properties, if any.
- """
+ """Return a Junit node containing custom properties, if any."""
if self.global_properties:
properties = ET.Element("properties")
for name, value in self.global_properties:
diff --git a/src/_pytest/logging.py b/src/_pytest/logging.py
index 0ee9457ea..5dfd47887 100644
--- a/src/_pytest/logging.py
+++ b/src/_pytest/logging.py
@@ -1,4 +1,4 @@
-""" Access and control log capturing. """
+"""Access and control log capturing."""
import logging
import os
import re
@@ -43,9 +43,8 @@ def _remove_ansi_escape_sequences(text: str) -> str:
class ColoredLevelFormatter(logging.Formatter):
- """
- Colorize the %(levelname)..s part of the log format passed to __init__.
- """
+ """A logging formatter which colorizes the %(levelname)..s part of the
+ log format passed to __init__."""
LOGLEVEL_COLOROPTS = {
logging.CRITICAL: {"red"},
@@ -110,7 +109,7 @@ class PercentStyleMultiline(logging.PercentStyle):
@staticmethod
def _get_auto_indent(auto_indent_option: Union[int, str, bool, None]) -> int:
- """Determines the current auto indentation setting
+ """Determine the current auto indentation setting.
Specify auto indent behavior (on/off/fixed) by passing in
extra={"auto_indent": [value]} to the call to logging.log() or
@@ -128,12 +127,14 @@ class PercentStyleMultiline(logging.PercentStyle):
Any other values for the option are invalid, and will silently be
converted to the default.
- :param any auto_indent_option: User specified option for indentation
- from command line, config or extra kwarg. Accepts int, bool or str.
- str option accepts the same range of values as boolean config options,
- as well as positive integers represented in str form.
+ :param None|bool|int|str auto_indent_option:
+ User specified option for indentation from command line, config
+ or extra kwarg. Accepts int, bool or str. str option accepts the
+ same range of values as boolean config options, as well as
+ positive integers represented in str form.
- :returns: indentation value, which can be
+ :returns:
+ Indentation value, which can be
-1 (automatically determine indentation) or
0 (auto-indent turned off) or
>0 (explicitly set indentation position).
@@ -164,7 +165,7 @@ class PercentStyleMultiline(logging.PercentStyle):
def format(self, record: logging.LogRecord) -> str:
if "\n" in record.message:
if hasattr(record, "auto_indent"):
- # passed in from the "extra={}" kwarg on the call to logging.log()
+ # Passed in from the "extra={}" kwarg on the call to logging.log().
auto_indent = self._get_auto_indent(record.auto_indent) # type: ignore[attr-defined]
else:
auto_indent = self._auto_indent
@@ -178,7 +179,7 @@ class PercentStyleMultiline(logging.PercentStyle):
lines[0]
)
else:
- # optimizes logging by allowing a fixed indentation
+ # Optimizes logging by allowing a fixed indentation.
indentation = auto_indent
lines[0] = formatted
return ("\n" + " " * indentation).join(lines)
@@ -316,7 +317,7 @@ class LogCaptureHandler(logging.StreamHandler):
stream = None # type: StringIO
def __init__(self) -> None:
- """Creates a new log handler."""
+ """Create a new log handler."""
super().__init__(StringIO())
self.records = [] # type: List[logging.LogRecord]
@@ -342,18 +343,17 @@ class LogCaptureFixture:
"""Provides access and control of log capturing."""
def __init__(self, item: nodes.Node) -> None:
- """Creates a new funcarg."""
self._item = item
- # dict of log name -> log level
self._initial_handler_level = None # type: Optional[int]
+ # Dict of log name -> log level.
self._initial_logger_levels = {} # type: Dict[Optional[str], int]
def _finalize(self) -> None:
- """Finalizes the fixture.
+ """Finalize the fixture.
This restores the log levels changed by :meth:`set_level`.
"""
- # restore log levels
+ # Restore log levels.
if self._initial_handler_level is not None:
self.handler.setLevel(self._initial_handler_level)
for logger_name, level in self._initial_logger_levels.items():
@@ -362,20 +362,20 @@ class LogCaptureFixture:
@property
def handler(self) -> LogCaptureHandler:
- """
+ """Get the logging handler used by the fixture.
+
:rtype: LogCaptureHandler
"""
return self._item._store[caplog_handler_key]
def get_records(self, when: str) -> List[logging.LogRecord]:
- """
- Get the logging records for one of the possible test phases.
+ """Get the logging records for one of the possible test phases.
:param str when:
Which test phase to obtain the records from. Valid values are: "setup", "call" and "teardown".
+ :returns: The list of captured records at the given stage.
:rtype: List[logging.LogRecord]
- :return: the list of captured records at the given stage
.. versionadded:: 3.4
"""
@@ -383,17 +383,17 @@ class LogCaptureFixture:
@property
def text(self) -> str:
- """Returns the formatted log text."""
+ """The formatted log text."""
return _remove_ansi_escape_sequences(self.handler.stream.getvalue())
@property
def records(self) -> List[logging.LogRecord]:
- """Returns the list of log records."""
+ """The list of log records."""
return self.handler.records
@property
def record_tuples(self) -> List[Tuple[str, int, str]]:
- """Returns a list of a stripped down version of log records intended
+ """A list of a stripped down version of log records intended
for use in assertion comparison.
The format of the tuple is:
@@ -404,15 +404,18 @@ class LogCaptureFixture:
@property
def messages(self) -> List[str]:
- """Returns a list of format-interpolated log messages.
+ """A list of format-interpolated log messages.
+
+ Unlike 'records', which contains the format string and parameters for
+ interpolation, log messages in this list are all interpolated.
- Unlike 'records', which contains the format string and parameters for interpolation, log messages in this list
- are all interpolated.
- Unlike 'text', which contains the output from the handler, log messages in this list are unadorned with
- levels, timestamps, etc, making exact comparisons more reliable.
+ Unlike 'text', which contains the output from the handler, log
+ messages in this list are unadorned with levels, timestamps, etc,
+ making exact comparisons more reliable.
- Note that traceback or stack info (from :func:`logging.exception` or the `exc_info` or `stack_info` arguments
- to the logging functions) is not included, as this is added by the formatter in the handler.
+ Note that traceback or stack info (from :func:`logging.exception` or
+ the `exc_info` or `stack_info` arguments to the logging functions) is
+ not included, as this is added by the formatter in the handler.
.. versionadded:: 3.7
"""
@@ -423,18 +426,17 @@ class LogCaptureFixture:
self.handler.reset()
def set_level(self, level: Union[int, str], logger: Optional[str] = None) -> None:
- """Sets the level for capturing of logs. The level will be restored to its previous value at the end of
- the test.
-
- :param int level: the logger to level.
- :param str logger: the logger to update the level. If not given, the root logger level is updated.
+ """Set the level of a logger for the duration of a test.
.. versionchanged:: 3.4
- The levels of the loggers changed by this function will be restored to their initial values at the
- end of the test.
+ The levels of the loggers changed by this function will be
+ restored to their initial values at the end of the test.
+
+ :param int level: The level.
+ :param str logger: The logger to update. If not given, the root logger.
"""
logger_obj = logging.getLogger(logger)
- # save the original log-level to restore it during teardown
+ # Save the original log-level to restore it during teardown.
self._initial_logger_levels.setdefault(logger, logger_obj.level)
logger_obj.setLevel(level)
self._initial_handler_level = self.handler.level
@@ -444,11 +446,12 @@ class LogCaptureFixture:
def at_level(
self, level: int, logger: Optional[str] = None
) -> Generator[None, None, None]:
- """Context manager that sets the level for capturing of logs. After the end of the 'with' statement the
- level is restored to its original value.
+ """Context manager that sets the level for capturing of logs. After
+ the end of the 'with' statement the level is restored to its original
+ value.
- :param int level: the logger to level.
- :param str logger: the logger to update the level. If not given, the root logger level is updated.
+ :param int level: The level.
+ :param str logger: The logger to update. If not given, the root logger.
"""
logger_obj = logging.getLogger(logger)
orig_level = logger_obj.level
@@ -509,11 +512,10 @@ def pytest_configure(config: Config) -> None:
class LoggingPlugin:
- """Attaches to the logging module and captures log messages for each test.
- """
+ """Attaches to the logging module and captures log messages for each test."""
def __init__(self, config: Config) -> None:
- """Creates a new plugin to capture log messages.
+ """Create a new plugin to capture log messages.
The formatter can be safely shared across all handlers so
create a single one for the entire test session here.
@@ -572,7 +574,7 @@ class LoggingPlugin:
self.log_cli_handler.setFormatter(log_cli_formatter)
def _create_formatter(self, log_format, log_date_format, auto_indent):
- # color option doesn't exist if terminal plugin is disabled
+ # Color option doesn't exist if terminal plugin is disabled.
color = getattr(self._config.option, "color", "no")
if color != "no" and ColoredLevelFormatter.LEVELNAME_FMT_REGEX.search(
log_format
@@ -590,12 +592,12 @@ class LoggingPlugin:
return formatter
def set_log_path(self, fname: str) -> None:
- """Public method, which can set filename parameter for
- Logging.FileHandler(). Also creates parent directory if
- it does not exist.
+ """Set the filename parameter for Logging.FileHandler().
+
+ Creates parent directory if it does not exist.
.. warning::
- Please considered as an experimental API.
+ This is an experimental API.
"""
fpath = Path(fname)
@@ -652,19 +654,17 @@ class LoggingPlugin:
@pytest.hookimpl(hookwrapper=True)
def pytest_runtestloop(self, session: Session) -> Generator[None, None, None]:
- """Runs all collected test items."""
-
if session.config.option.collectonly:
yield
return
if self._log_cli_enabled() and self._config.getoption("verbose") < 1:
- # setting verbose flag is needed to avoid messy test progress output
+ # The verbose flag is needed to avoid messy test progress output.
self._config.option.verbose = 1
with catching_logs(self.log_cli_handler, level=self.log_cli_level):
with catching_logs(self.log_file_handler, level=self.log_file_level):
- yield # run all the tests
+ yield # Run all the tests.
@pytest.hookimpl
def pytest_runtest_logstart(self) -> None:
@@ -676,7 +676,7 @@ class LoggingPlugin:
self.log_cli_handler.set_when("logreport")
def _runtest_for(self, item: nodes.Item, when: str) -> Generator[None, None, None]:
- """Implements the internals of pytest_runtest_xxx() hook."""
+ """Implement the internals of the pytest_runtest_xxx() hooks."""
with catching_logs(
self.caplog_handler, level=self.log_level,
) as caplog_handler, catching_logs(
@@ -734,9 +734,7 @@ class LoggingPlugin:
class _FileHandler(logging.FileHandler):
- """
- Custom FileHandler with pytest tweaks.
- """
+ """A logging FileHandler with pytest tweaks."""
def handleError(self, record: logging.LogRecord) -> None:
# Handled by LogCaptureHandler.
@@ -744,12 +742,12 @@ class _FileHandler(logging.FileHandler):
class _LiveLoggingStreamHandler(logging.StreamHandler):
- """
- Custom StreamHandler used by the live logging feature: it will write a newline before the first log message
- in each test.
+ """A logging StreamHandler used by the live logging feature: it will
+ write a newline before the first log message in each test.
- During live logging we must also explicitly disable stdout/stderr capturing otherwise it will get captured
- and won't appear in the terminal.
+ During live logging we must also explicitly disable stdout/stderr
+ capturing otherwise it will get captured and won't appear in the
+ terminal.
"""
# Officially stream needs to be a IO[str], but TerminalReporter
@@ -761,10 +759,6 @@ class _LiveLoggingStreamHandler(logging.StreamHandler):
terminal_reporter: TerminalReporter,
capture_manager: Optional[CaptureManager],
) -> None:
- """
- :param _pytest.terminal.TerminalReporter terminal_reporter:
- :param _pytest.capture.CaptureManager capture_manager:
- """
logging.StreamHandler.__init__(self, stream=terminal_reporter) # type: ignore[arg-type]
self.capture_manager = capture_manager
self.reset()
@@ -772,11 +766,11 @@ class _LiveLoggingStreamHandler(logging.StreamHandler):
self._test_outcome_written = False
def reset(self) -> None:
- """Reset the handler; should be called before the start of each test"""
+ """Reset the handler; should be called before the start of each test."""
self._first_record_emitted = False
def set_when(self, when: Optional[str]) -> None:
- """Prepares for the given test phase (setup/call/teardown)"""
+ """Prepare for the given test phase (setup/call/teardown)."""
self._when = when
self._section_name_shown = False
if when == "start":
@@ -807,7 +801,7 @@ class _LiveLoggingStreamHandler(logging.StreamHandler):
class _LiveLoggingNullHandler(logging.NullHandler):
- """A handler used when live logging is disabled."""
+ """A logging handler used when live logging is disabled."""
def reset(self) -> None:
pass
diff --git a/src/_pytest/main.py b/src/_pytest/main.py
index 969988305..292ba58e2 100644
--- a/src/_pytest/main.py
+++ b/src/_pytest/main.py
@@ -1,4 +1,4 @@
-""" core implementation of testing process: init, session, runtest loop. """
+"""Core implementation of the testing process: init, session, runtest loop."""
import argparse
import fnmatch
import functools
@@ -206,7 +206,7 @@ def validate_basetemp(path: str) -> str:
raise argparse.ArgumentTypeError(msg)
def is_ancestor(base: Path, query: Path) -> bool:
- """ return True if query is an ancestor of base, else False."""
+ """Return whether query is an ancestor of base."""
if base == query:
return True
for parent in base.parents:
@@ -228,7 +228,7 @@ def validate_basetemp(path: str) -> str:
def wrap_session(
config: Config, doit: Callable[[Config, "Session"], Optional[Union[int, ExitCode]]]
) -> Union[int, ExitCode]:
- """Skeleton command line program"""
+ """Skeleton command line program."""
session = Session.from_config(config)
session.exitstatus = ExitCode.OK
initstate = 0
@@ -291,8 +291,8 @@ def pytest_cmdline_main(config: Config) -> Union[int, ExitCode]:
def _main(config: Config, session: "Session") -> Optional[Union[int, ExitCode]]:
- """ default command line protocol for initialization, session,
- running tests and reporting. """
+ """Default command line protocol for initialization, session,
+ running tests and reporting."""
config.hook.pytest_collection(session=session)
config.hook.pytest_runtestloop(session=session)
@@ -328,8 +328,8 @@ def pytest_runtestloop(session: "Session") -> bool:
def _in_venv(path: py.path.local) -> bool:
- """Attempts to detect if ``path`` is the root of a Virtual Environment by
- checking for the existence of the appropriate activate script"""
+ """Attempt to detect if ``path`` is the root of a Virtual Environment by
+ checking for the existence of the appropriate activate script."""
bindir = path.join("Scripts" if sys.platform.startswith("win") else "bin")
if not bindir.isdir():
return False
@@ -390,17 +390,17 @@ def pytest_collection_modifyitems(items: List[nodes.Item], config: Config) -> No
class NoMatch(Exception):
- """ raised if matching cannot locate a matching names. """
+ """Matching cannot locate matching names."""
class Interrupted(KeyboardInterrupt):
- """ signals an interrupted test run. """
+ """Signals that the test run was interrupted."""
- __module__ = "builtins" # for py3
+ __module__ = "builtins" # For py3.
class Failed(Exception):
- """ signals a stop as failed test run. """
+ """Signals a stop as failed test run."""
@attr.s
@@ -434,7 +434,7 @@ class Session(nodes.FSCollector):
self.startdir = config.invocation_dir
self._initialpaths = frozenset() # type: FrozenSet[py.path.local]
- # Keep track of any collected nodes in here, so we don't duplicate fixtures
+ # Keep track of any collected nodes in here, so we don't duplicate fixtures.
self._collection_node_cache1 = (
{}
) # type: Dict[py.path.local, Sequence[nodes.Collector]]
@@ -469,7 +469,7 @@ class Session(nodes.FSCollector):
)
def _node_location_to_relpath(self, node_path: py.path.local) -> str:
- # bestrelpath is a quite slow function
+ # bestrelpath is a quite slow function.
return self._bestrelpathcache[node_path]
@hookimpl(tryfirst=True)
@@ -594,7 +594,7 @@ class Session(nodes.FSCollector):
# Start with a Session root, and delve to argpath item (dir or file)
# and stack all Packages found on the way.
- # No point in finding packages when collecting doctests
+ # No point in finding packages when collecting doctests.
if not self.config.getoption("doctestmodules", False):
pm = self.config.pluginmanager
for parent in reversed(argpath.parts()):
@@ -609,7 +609,7 @@ class Session(nodes.FSCollector):
if col:
if isinstance(col[0], Package):
self._collection_pkg_roots[str(parent)] = col[0]
- # always store a list in the cache, matchnodes expects it
+ # Always store a list in the cache, matchnodes expects it.
self._collection_node_cache1[col[0].fspath] = [col[0]]
# If it's a directory argument, recurse and look for any Subpackages.
@@ -689,7 +689,7 @@ class Session(nodes.FSCollector):
return spec.origin
def _parsearg(self, arg: str) -> Tuple[py.path.local, List[str]]:
- """ return (fspath, names) tuple after checking the file exists. """
+ """Return (fspath, names) tuple after checking the file exists."""
strpath, *parts = str(arg).split("::")
if self.config.option.pyargs:
strpath = self._tryconvertpyarg(strpath)
@@ -740,18 +740,18 @@ class Session(nodes.FSCollector):
if rep.passed:
has_matched = False
for x in rep.result:
- # TODO: remove parametrized workaround once collection structure contains parametrization
+ # TODO: Remove parametrized workaround once collection structure contains parametrization.
if x.name == name or x.name.split("[")[0] == name:
resultnodes.extend(self.matchnodes([x], nextnames))
has_matched = True
- # XXX accept IDs that don't have "()" for class instances
+ # XXX Accept IDs that don't have "()" for class instances.
if not has_matched and len(rep.result) == 1 and x.name == "()":
nextnames.insert(0, name)
resultnodes.extend(self.matchnodes([x], nextnames))
else:
- # report collection failures here to avoid failing to run some test
+ # Report collection failures here to avoid failing to run some test
# specified in the command line because the module could not be
- # imported (#134)
+ # imported (#134).
node.ihook.pytest_collectreport(report=rep)
return resultnodes
diff --git a/src/_pytest/mark/__init__.py b/src/_pytest/mark/__init__.py
index bc1dd1a70..d677d49c1 100644
--- a/src/_pytest/mark/__init__.py
+++ b/src/_pytest/mark/__init__.py
@@ -1,4 +1,4 @@
-""" generic mechanism for marking and selecting python functions. """
+"""Generic mechanism for marking and selecting python functions."""
import typing
from typing import AbstractSet
from typing import List
@@ -58,9 +58,9 @@ def param(
def test_eval(test_input, expected):
assert eval(test_input) == expected
- :param values: variable args of the values of the parameter set, in order.
- :keyword marks: a single mark or a list of marks to be applied to this parameter set.
- :keyword str id: the id to attribute to this parameter set.
+ :param values: Variable args of the values of the parameter set, in order.
+ :keyword marks: A single mark or a list of marks to be applied to this parameter set.
+ :keyword str id: The id to attribute to this parameter set.
"""
return ParameterSet.param(*values, marks=marks, id=id)
@@ -148,22 +148,22 @@ class KeywordMatcher:
def from_item(cls, item: "Item") -> "KeywordMatcher":
mapped_names = set()
- # Add the names of the current item and any parent items
+ # Add the names of the current item and any parent items.
import pytest
for node in item.listchain():
if not isinstance(node, (pytest.Instance, pytest.Session)):
mapped_names.add(node.name)
- # Add the names added as extra keywords to current or parent items
+ # Add the names added as extra keywords to current or parent items.
mapped_names.update(item.listextrakeywords())
- # Add the names attached to the current function through direct assignment
+ # Add the names attached to the current function through direct assignment.
function_obj = getattr(item, "function", None)
if function_obj:
mapped_names.update(function_obj.__dict__)
- # add the markers to the keywords as we no longer handle them correctly
+ # Add the markers to the keywords as we no longer handle them correctly.
mapped_names.update(mark.name for mark in item.iter_markers())
return cls(mapped_names)
diff --git a/src/_pytest/mark/expression.py b/src/_pytest/mark/expression.py
index 73b7bf169..f57001097 100644
--- a/src/_pytest/mark/expression.py
+++ b/src/_pytest/mark/expression.py
@@ -1,5 +1,4 @@
-r"""
-Evaluate match expressions, as used by `-k` and `-m`.
+r"""Evaluate match expressions, as used by `-k` and `-m`.
The grammar is:
@@ -213,10 +212,11 @@ class Expression:
def evaluate(self, matcher: Callable[[str], bool]) -> bool:
"""Evaluate the match expression.
- :param matcher: Given an identifier, should return whether it matches or not.
- Should be prepared to handle arbitrary strings as input.
+ :param matcher:
+ Given an identifier, should return whether it matches or not.
+ Should be prepared to handle arbitrary strings as input.
- Returns whether the expression matches or not.
+ :returns: Whether the expression matches or not.
"""
ret = eval(
self.code, {"__builtins__": {}}, MatcherAdapter(matcher)
diff --git a/src/_pytest/mark/structures.py b/src/_pytest/mark/structures.py
index 656782299..5abe4b945 100644
--- a/src/_pytest/mark/structures.py
+++ b/src/_pytest/mark/structures.py
@@ -107,14 +107,15 @@ class ParameterSet(
parameterset: Union["ParameterSet", Sequence[object], object],
force_tuple: bool = False,
) -> "ParameterSet":
- """
+ """Extract from an object or objects.
+
:param parameterset:
- a legacy style parameterset that may or may not be a tuple,
- and may or may not be wrapped into a mess of mark objects
+ A legacy style parameterset that may or may not be a tuple,
+ and may or may not be wrapped into a mess of mark objects.
:param force_tuple:
- enforce tuple wrapping so single argument tuple values
- don't get decomposed and break tests
+ Enforce tuple wrapping so single argument tuple values
+ don't get decomposed and break tests.
"""
if isinstance(parameterset, cls):
@@ -166,7 +167,7 @@ class ParameterSet(
del argvalues
if parameters:
- # check all parameter sets have the correct number of values
+ # Check all parameter sets have the correct number of values.
for param in parameters:
if len(param.values) != len(argnames):
msg = (
@@ -186,8 +187,8 @@ class ParameterSet(
pytrace=False,
)
else:
- # empty parameter set (likely computed at runtime): create a single
- # parameter set with NOTSET values, with the "empty parameter set" mark applied to it
+ # Empty parameter set (likely computed at runtime): create a single
+ # parameter set with NOTSET values, with the "empty parameter set" mark applied to it.
mark = get_empty_parameterset_mark(config, argnames, func)
parameters.append(
ParameterSet(values=(NOTSET,) * len(argnames), marks=[mark], id=None)
@@ -220,8 +221,7 @@ class Mark:
Combines by appending args and merging kwargs.
- :param other: The mark to combine with.
- :type other: Mark
+ :param Mark other: The mark to combine with.
:rtype: Mark
"""
assert self.name == other.name
@@ -314,7 +314,7 @@ class MarkDecorator:
Unlike calling the MarkDecorator, with_args() can be used even
if the sole argument is a callable/class.
- :return: MarkDecorator
+ :rtype: MarkDecorator
"""
mark = Mark(self.name, args, kwargs)
return self.__class__(self.mark.combined_with(mark))
@@ -344,9 +344,7 @@ class MarkDecorator:
def get_unpacked_marks(obj) -> List[Mark]:
- """
- obtain the unpacked marks that are stored on an object
- """
+ """Obtain the unpacked marks that are stored on an object."""
mark_list = getattr(obj, "pytestmark", [])
if not isinstance(mark_list, list):
mark_list = [mark_list]
@@ -354,10 +352,9 @@ def get_unpacked_marks(obj) -> List[Mark]:
def normalize_mark_list(mark_list: Iterable[Union[Mark, MarkDecorator]]) -> List[Mark]:
- """
- normalizes marker decorating helpers to mark objects
+ """Normalize marker decorating helpers to mark objects.
- :type mark_list: List[Union[Mark, Markdecorator]]
+ :type List[Union[Mark, Markdecorator]] mark_list:
:rtype: List[Mark]
"""
extracted = [
diff --git a/src/_pytest/monkeypatch.py b/src/_pytest/monkeypatch.py
index 2e5cca526..19208ac66 100644
--- a/src/_pytest/monkeypatch.py
+++ b/src/_pytest/monkeypatch.py
@@ -1,4 +1,4 @@
-""" monkeypatching and mocking functionality. """
+"""Monkeypatching and mocking functionality."""
import os
import re
import sys
@@ -27,8 +27,10 @@ V = TypeVar("V")
@fixture
def monkeypatch() -> Generator["MonkeyPatch", None, None]:
- """The returned ``monkeypatch`` fixture provides these
- helper methods to modify objects, dictionaries or os.environ::
+ """A convenient fixture for monkey-patching.
+
+ The fixture provides these methods to modify objects, dictionaries or
+ os.environ::
monkeypatch.setattr(obj, name, value, raising=True)
monkeypatch.delattr(obj, name, raising=True)
@@ -39,10 +41,9 @@ def monkeypatch() -> Generator["MonkeyPatch", None, None]:
monkeypatch.syspath_prepend(path)
monkeypatch.chdir(path)
- All modifications will be undone after the requesting
- test function or fixture has finished. The ``raising``
- parameter determines if a KeyError or AttributeError
- will be raised if the set/deletion operation has no target.
+ All modifications will be undone after the requesting test function or
+ fixture has finished. The ``raising`` parameter determines if a KeyError
+ or AttributeError will be raised if the set/deletion operation has no target.
"""
mpatch = MonkeyPatch()
yield mpatch
@@ -50,7 +51,7 @@ def monkeypatch() -> Generator["MonkeyPatch", None, None]:
def resolve(name: str) -> object:
- # simplified from zope.dottedname
+ # Simplified from zope.dottedname.
parts = name.split(".")
used = parts.pop(0)
@@ -63,12 +64,11 @@ def resolve(name: str) -> object:
pass
else:
continue
- # we use explicit un-nesting of the handling block in order
- # to avoid nested exceptions on python 3
+ # We use explicit un-nesting of the handling block in order
+ # to avoid nested exceptions.
try:
__import__(used)
except ImportError as ex:
- # str is used for py2 vs py3
expected = str(ex).split()[-1]
if expected == used:
raise
@@ -111,8 +111,8 @@ notset = Notset()
class MonkeyPatch:
- """ Object returned by the ``monkeypatch`` fixture keeping a record of setattr/item/env/syspath changes.
- """
+ """Object returned by the ``monkeypatch`` fixture keeping a record of
+ setattr/item/env/syspath changes."""
def __init__(self) -> None:
self._setattr = [] # type: List[Tuple[object, str, object]]
@@ -124,9 +124,10 @@ class MonkeyPatch:
@contextmanager
def context(self) -> Generator["MonkeyPatch", None, None]:
- """
- Context manager that returns a new :class:`MonkeyPatch` object which
- undoes any patching done inside the ``with`` block upon exit:
+ """Context manager that returns a new :class:`MonkeyPatch` object
+ which undoes any patching done inside the ``with`` block upon exit.
+
+ Example:
.. code-block:: python
@@ -166,18 +167,16 @@ class MonkeyPatch:
value: object = notset,
raising: bool = True,
) -> None:
- """ Set attribute value on target, memorizing the old value.
- By default raise AttributeError if the attribute did not exist.
+ """Set attribute value on target, memorizing the old value.
For convenience you can specify a string as ``target`` which
will be interpreted as a dotted import path, with the last part
- being the attribute name. Example:
+ being the attribute name. For example,
``monkeypatch.setattr("os.getcwd", lambda: "/")``
would set the ``getcwd`` function of the ``os`` module.
- The ``raising`` value determines if the setattr should fail
- if the attribute is not already present (defaults to True
- which means it will raise).
+ Raises AttributeError if the attribute does not exist, unless
+ ``raising`` is set to False.
"""
__tracebackhide__ = True
import inspect
@@ -215,15 +214,14 @@ class MonkeyPatch:
name: Union[str, Notset] = notset,
raising: bool = True,
) -> None:
- """ Delete attribute ``name`` from ``target``, by default raise
- AttributeError it the attribute did not previously exist.
+ """Delete attribute ``name`` from ``target``.
If no ``name`` is specified and ``target`` is a string
it will be interpreted as a dotted import path with the
last part being the attribute name.
- If ``raising`` is set to False, no exception will be raised if the
- attribute is missing.
+ Raises AttributeError it the attribute does not exist, unless
+ ``raising`` is set to False.
"""
__tracebackhide__ = True
import inspect
@@ -249,15 +247,15 @@ class MonkeyPatch:
delattr(target, name)
def setitem(self, dic: MutableMapping[K, V], name: K, value: V) -> None:
- """ Set dictionary entry ``name`` to value. """
+ """Set dictionary entry ``name`` to value."""
self._setitem.append((dic, name, dic.get(name, notset)))
dic[name] = value
def delitem(self, dic: MutableMapping[K, V], name: K, raising: bool = True) -> None:
- """ Delete ``name`` from dict. Raise KeyError if it doesn't exist.
+ """Delete ``name`` from dict.
- If ``raising`` is set to False, no exception will be raised if the
- key is missing.
+ Raises ``KeyError`` if it doesn't exist, unless ``raising`` is set to
+ False.
"""
if name not in dic:
if raising:
@@ -267,9 +265,12 @@ class MonkeyPatch:
del dic[name]
def setenv(self, name: str, value: str, prepend: Optional[str] = None) -> None:
- """ Set environment variable ``name`` to ``value``. If ``prepend``
- is a character, read the current environment variable value
- and prepend the ``value`` adjoined with the ``prepend`` character."""
+ """Set environment variable ``name`` to ``value``.
+
+ If ``prepend`` is a character, read the current environment variable
+ value and prepend the ``value`` adjoined with the ``prepend``
+ character.
+ """
if not isinstance(value, str):
warnings.warn(
pytest.PytestWarning(
@@ -286,17 +287,16 @@ class MonkeyPatch:
self.setitem(os.environ, name, value)
def delenv(self, name: str, raising: bool = True) -> None:
- """ Delete ``name`` from the environment. Raise KeyError if it does
- not exist.
+ """Delete ``name`` from the environment.
- If ``raising`` is set to False, no exception will be raised if the
- environment variable is missing.
+ Raises ``KeyError`` if it does not exist, unless ``raising`` is set to
+ False.
"""
environ = os.environ # type: MutableMapping[str, str]
self.delitem(environ, name, raising=raising)
def syspath_prepend(self, path) -> None:
- """ Prepend ``path`` to ``sys.path`` list of import locations. """
+ """Prepend ``path`` to ``sys.path`` list of import locations."""
from pkg_resources import fixup_namespace_packages
if self._savesyspath is None:
@@ -318,7 +318,8 @@ class MonkeyPatch:
invalidate_caches()
def chdir(self, path) -> None:
- """ Change the current working directory to the specified path.
+ """Change the current working directory to the specified path.
+
Path can be a string or a py.path.local object.
"""
if self._cwd is None:
@@ -326,15 +327,16 @@ class MonkeyPatch:
if hasattr(path, "chdir"):
path.chdir()
elif isinstance(path, Path):
- # modern python uses the fspath protocol here LEGACY
+ # Modern python uses the fspath protocol here LEGACY
os.chdir(str(path))
else:
os.chdir(path)
def undo(self) -> None:
- """ Undo previous changes. This call consumes the
- undo stack. Calling it a second time has no effect unless
- you do more monkeypatching after the undo call.
+ """Undo previous changes.
+
+ This call consumes the undo stack. Calling it a second time has no
+ effect unless you do more monkeypatching after the undo call.
There is generally no need to call `undo()`, since it is
called automatically during tear-down.
@@ -356,7 +358,7 @@ class MonkeyPatch:
try:
del dictionary[key]
except KeyError:
- pass # was already deleted, so we have the desired state
+ pass # Was already deleted, so we have the desired state.
else:
dictionary[key] = value
self._setitem[:] = []
diff --git a/src/_pytest/nodes.py b/src/_pytest/nodes.py
index d53d591e7..cc1cc7ebd 100644
--- a/src/_pytest/nodes.py
+++ b/src/_pytest/nodes.py
@@ -66,19 +66,23 @@ def _splitnode(nodeid: str) -> Tuple[str, ...]:
['testing', 'code', 'test_excinfo.py', 'TestFormattedExcinfo']
"""
if nodeid == "":
- # If there is no root node at all, return an empty list so the caller's logic can remain sane
+ # If there is no root node at all, return an empty list so the caller's
+ # logic can remain sane.
return ()
parts = nodeid.split(SEP)
- # Replace single last element 'test_foo.py::Bar' with multiple elements 'test_foo.py', 'Bar'
+ # Replace single last element 'test_foo.py::Bar' with multiple elements
+ # 'test_foo.py', 'Bar'.
parts[-1:] = parts[-1].split("::")
- # Convert parts into a tuple to avoid possible errors with caching of a mutable type
+ # Convert parts into a tuple to avoid possible errors with caching of a
+ # mutable type.
return tuple(parts)
def ischildnode(baseid: str, nodeid: str) -> bool:
"""Return True if the nodeid is a child node of the baseid.
- E.g. 'foo/bar::Baz' is a child of 'foo', 'foo/bar' and 'foo/bar::Baz', but not of 'foo/blorp'
+ E.g. 'foo/bar::Baz' is a child of 'foo', 'foo/bar' and 'foo/bar::Baz',
+ but not of 'foo/blorp'.
"""
base_parts = _splitnode(baseid)
node_parts = _splitnode(nodeid)
@@ -100,8 +104,11 @@ class NodeMeta(type):
class Node(metaclass=NodeMeta):
- """ base class for Collector and Item the test collection tree.
- Collector subclasses have children, Items are terminal nodes."""
+ """Base class for Collector and Item, the components of the test
+ collection tree.
+
+ Collector subclasses have children; Items are leaf nodes.
+ """
# Use __slots__ to make attribute access faster.
# Note that __dict__ is still available.
@@ -125,13 +132,13 @@ class Node(metaclass=NodeMeta):
fspath: Optional[py.path.local] = None,
nodeid: Optional[str] = None,
) -> None:
- #: a unique name within the scope of the parent node
+ #: A unique name within the scope of the parent node.
self.name = name
- #: the parent collector node.
+ #: The parent collector node.
self.parent = parent
- #: the pytest config object
+ #: The pytest config object.
if config:
self.config = config # type: Config
else:
@@ -139,7 +146,7 @@ class Node(metaclass=NodeMeta):
raise TypeError("config or parent must be provided")
self.config = parent.config
- #: the session this node is part of
+ #: The pytest session this node is part of.
if session:
self.session = session
else:
@@ -147,19 +154,19 @@ class Node(metaclass=NodeMeta):
raise TypeError("session or parent must be provided")
self.session = parent.session
- #: filesystem path where this node was collected from (can be None)
+ #: Filesystem path where this node was collected from (can be None).
self.fspath = fspath or getattr(parent, "fspath", None)
- #: keywords/markers collected from all scopes
+ #: Keywords/markers collected from all scopes.
self.keywords = NodeKeywords(self)
- #: the marker objects belonging to this node
+ #: The marker objects belonging to this node.
self.own_markers = [] # type: List[Mark]
- #: allow adding of extra keywords to use for matching
+ #: Allow adding of extra keywords to use for matching.
self.extra_keyword_matches = set() # type: Set[str]
- # used for storing artificial fixturedefs for direct parametrization
+ # Used for storing artificial fixturedefs for direct parametrization.
self._name2pseudofixturedef = {} # type: Dict[str, FixtureDef]
if nodeid is not None:
@@ -178,15 +185,15 @@ class Node(metaclass=NodeMeta):
@classmethod
def from_parent(cls, parent: "Node", **kw):
- """
- Public Constructor for Nodes
+ """Public constructor for Nodes.
This indirection got introduced in order to enable removing
the fragile logic from the node constructors.
- Subclasses can use ``super().from_parent(...)`` when overriding the construction
+ Subclasses can use ``super().from_parent(...)`` when overriding the
+ construction.
- :param parent: the parent node of this test Node
+ :param parent: The parent node of this Node.
"""
if "config" in kw:
raise TypeError("config is not a valid argument for from_parent")
@@ -196,27 +203,27 @@ class Node(metaclass=NodeMeta):
@property
def ihook(self):
- """ fspath sensitive hook proxy used to call pytest hooks"""
+ """fspath-sensitive hook proxy used to call pytest hooks."""
return self.session.gethookproxy(self.fspath)
def __repr__(self) -> str:
return "<{} {}>".format(self.__class__.__name__, getattr(self, "name", None))
def warn(self, warning: "PytestWarning") -> None:
- """Issue a warning for this item.
+ """Issue a warning for this Node.
- Warnings will be displayed after the test session, unless explicitly suppressed
+ Warnings will be displayed after the test session, unless explicitly suppressed.
- :param Warning warning: the warning instance to issue. Must be a subclass of PytestWarning.
+ :param Warning warning:
+ The warning instance to issue. Must be a subclass of PytestWarning.
- :raise ValueError: if ``warning`` instance is not a subclass of PytestWarning.
+ :raises ValueError: If ``warning`` instance is not a subclass of PytestWarning.
Example usage:
.. code-block:: python
node.warn(PytestWarning("some message"))
-
"""
from _pytest.warning_types import PytestWarning
@@ -232,10 +239,11 @@ class Node(metaclass=NodeMeta):
warning, category=None, filename=str(path), lineno=lineno + 1,
)
- # methods for ordering nodes
+ # Methods for ordering nodes.
+
@property
def nodeid(self) -> str:
- """ a ::-separated string denoting its collection tree address. """
+ """A ::-separated string denoting its collection tree address."""
return self._nodeid
def __hash__(self) -> int:
@@ -248,8 +256,8 @@ class Node(metaclass=NodeMeta):
pass
def listchain(self) -> List["Node"]:
- """ return list of all parent collectors up to self,
- starting from root of collection tree. """
+ """Return list of all parent collectors up to self, starting from
+ the root of collection tree."""
chain = []
item = self # type: Optional[Node]
while item is not None:
@@ -261,12 +269,10 @@ class Node(metaclass=NodeMeta):
def add_marker(
self, marker: Union[str, MarkDecorator], append: bool = True
) -> None:
- """dynamically add a marker object to the node.
+ """Dynamically add a marker object to the node.
- :type marker: ``str`` or ``pytest.mark.*`` object
- :param marker:
- ``append=True`` whether to append the marker,
- if ``False`` insert at position ``0``.
+ :param append:
+ Whether to append the marker, or prepend it.
"""
from _pytest.mark import MARK_GEN
@@ -283,21 +289,19 @@ class Node(metaclass=NodeMeta):
self.own_markers.insert(0, marker_.mark)
def iter_markers(self, name: Optional[str] = None) -> Iterator[Mark]:
- """
- :param name: if given, filter the results by the name attribute
+ """Iterate over all markers of the node.
- iterate over all markers of the node
+ :param name: If given, filter the results by the name attribute.
"""
return (x[1] for x in self.iter_markers_with_node(name=name))
def iter_markers_with_node(
self, name: Optional[str] = None
) -> Iterator[Tuple["Node", Mark]]:
- """
- :param name: if given, filter the results by the name attribute
+ """Iterate over all markers of the node.
- iterate over all markers of the node
- returns sequence of tuples (node, mark)
+ :param name: If given, filter the results by the name attribute.
+ :returns: An iterator of (node, mark) tuples.
"""
for node in reversed(self.listchain()):
for mark in node.own_markers:
@@ -315,16 +319,16 @@ class Node(metaclass=NodeMeta):
def get_closest_marker( # noqa: F811
self, name: str, default: Optional[Mark] = None
) -> Optional[Mark]:
- """return the first marker matching the name, from closest (for example function) to farther level (for example
- module level).
+ """Return the first marker matching the name, from closest (for
+ example function) to farther level (for example module level).
- :param default: fallback return value of no marker was found
- :param name: name to filter by
+ :param default: Fallback return value if no marker was found.
+ :param name: Name to filter by.
"""
return next(self.iter_markers(name=name), default)
def listextrakeywords(self) -> Set[str]:
- """ Return a set of all extra keywords in self and any parents."""
+ """Return a set of all extra keywords in self and any parents."""
extra_keywords = set() # type: Set[str]
for item in self.listchain():
extra_keywords.update(item.extra_keyword_matches)
@@ -334,7 +338,7 @@ class Node(metaclass=NodeMeta):
return [x.name for x in self.listchain()]
def addfinalizer(self, fin: Callable[[], object]) -> None:
- """ register a function to be called when this node is finalized.
+ """Register a function to be called when this node is finalized.
This method can only be called when this node is active
in a setup chain, for example during self.setup().
@@ -342,8 +346,8 @@ class Node(metaclass=NodeMeta):
self.session._setupstate.addfinalizer(fin, self)
def getparent(self, cls: "Type[_NodeType]") -> Optional[_NodeType]:
- """ get the next parent node (including ourself)
- which is an instance of the given class"""
+ """Get the next parent node (including self) which is an instance of
+ the given class."""
current = self # type: Optional[Node]
while current and not isinstance(current, cls):
current = current.parent
@@ -411,8 +415,7 @@ class Node(metaclass=NodeMeta):
excinfo: ExceptionInfo[BaseException],
style: "Optional[_TracebackStyle]" = None,
) -> Union[str, TerminalRepr]:
- """
- Return a representation of a collection or test failure.
+ """Return a representation of a collection or test failure.
:param excinfo: Exception information for the failure.
"""
@@ -422,13 +425,13 @@ class Node(metaclass=NodeMeta):
def get_fslocation_from_item(
node: "Node",
) -> Tuple[Union[str, py.path.local], Optional[int]]:
- """Tries to extract the actual location from a node, depending on available attributes:
+ """Try to extract the actual location from a node, depending on available attributes:
* "location": a pair (path, lineno)
* "obj": a Python object that the node wraps.
* "fspath": just a path
- :rtype: a tuple of (str|LocalPath, int) with filename and line number.
+ :rtype: A tuple of (str|py.path.local, int) with filename and line number.
"""
# See Item.location.
location = getattr(
@@ -443,25 +446,22 @@ def get_fslocation_from_item(
class Collector(Node):
- """ Collector instances create children through collect()
- and thus iteratively build a tree.
- """
+ """Collector instances create children through collect() and thus
+ iteratively build a tree."""
class CollectError(Exception):
- """ an error during collection, contains a custom message. """
+ """An error during collection, contains a custom message."""
def collect(self) -> Iterable[Union["Item", "Collector"]]:
- """ returns a list of children (items and collectors)
- for this collection node.
- """
+ """Return a list of children (items and collectors) for this
+ collection node."""
raise NotImplementedError("abstract")
# TODO: This omits the style= parameter which breaks Liskov Substitution.
def repr_failure( # type: ignore[override]
self, excinfo: ExceptionInfo[BaseException]
) -> Union[str, TerminalRepr]:
- """
- Return a representation of a collection failure.
+ """Return a representation of a collection failure.
:param excinfo: Exception information for the failure.
"""
@@ -538,24 +538,22 @@ class FSCollector(Collector):
@classmethod
def from_parent(cls, parent, *, fspath, **kw):
- """
- The public constructor
- """
+ """The public constructor."""
return super().from_parent(parent=parent, fspath=fspath, **kw)
def _gethookproxy(self, fspath: py.path.local):
- # check if we have the common case of running
- # hooks with all conftest.py files
+ # Check if we have the common case of running
+ # hooks with all conftest.py files.
pm = self.config.pluginmanager
my_conftestmodules = pm._getconftestmodules(
fspath, self.config.getoption("importmode")
)
remove_mods = pm._conftest_plugins.difference(my_conftestmodules)
if remove_mods:
- # one or more conftests are not in use at this fspath
+ # One or more conftests are not in use at this fspath.
proxy = FSHookProxy(pm, remove_mods)
else:
- # all plugins are active for this fspath
+ # All plugins are active for this fspath.
proxy = self.config.hook
return proxy
@@ -605,12 +603,13 @@ class FSCollector(Collector):
class File(FSCollector):
- """ base class for collecting tests from a file. """
+ """Base class for collecting tests from a file."""
class Item(Node):
- """ a basic test invocation item. Note that for a single function
- there might be multiple test invocation items.
+ """A basic test invocation item.
+
+ Note that for a single function there might be multiple test invocation items.
"""
nextitem = None
@@ -626,17 +625,16 @@ class Item(Node):
super().__init__(name, parent, config, session, nodeid=nodeid)
self._report_sections = [] # type: List[Tuple[str, str, str]]
- #: user properties is a list of tuples (name, value) that holds user
- #: defined properties for this test.
+ #: A list of tuples (name, value) that holds user defined properties
+ #: for this test.
self.user_properties = [] # type: List[Tuple[str, object]]
def runtest(self) -> None:
raise NotImplementedError("runtest must be implemented by Item subclass")
def add_report_section(self, when: str, key: str, content: str) -> None:
- """
- Adds a new report section, similar to what's done internally to add stdout and
- stderr captured output::
+ """Add a new report section, similar to what's done internally to add
+ stdout and stderr captured output::
item.add_report_section("call", "stdout", "report section contents")
@@ -645,7 +643,6 @@ class Item(Node):
:param str key:
Name of the section, can be customized at will. Pytest uses ``"stdout"`` and
``"stderr"`` internally.
-
:param str content:
The full contents as a string.
"""
diff --git a/src/_pytest/nose.py b/src/_pytest/nose.py
index 8bdc310ac..bb8f99772 100644
--- a/src/_pytest/nose.py
+++ b/src/_pytest/nose.py
@@ -1,4 +1,4 @@
-""" run test suites written for nose. """
+"""Run testsuites written for nose."""
from _pytest import python
from _pytest import unittest
from _pytest.config import hookimpl
@@ -9,9 +9,9 @@ from _pytest.nodes import Item
def pytest_runtest_setup(item):
if is_potential_nosetest(item):
if not call_optional(item.obj, "setup"):
- # call module level setup if there is no object level one
+ # Call module level setup if there is no object level one.
call_optional(item.parent.obj, "setup")
- # XXX this implies we only call teardown when setup worked
+ # XXX This implies we only call teardown when setup worked.
item.session._setupstate.addfinalizer((lambda: teardown_nose(item)), item)
@@ -22,8 +22,8 @@ def teardown_nose(item):
def is_potential_nosetest(item: Item) -> bool:
- # extra check needed since we do not do nose style setup/teardown
- # on direct unittest style classes
+ # Extra check needed since we do not do nose style setup/teardown
+ # on direct unittest style classes.
return isinstance(item, python.Function) and not isinstance(
item, unittest.TestCaseFunction
)
@@ -34,6 +34,6 @@ def call_optional(obj, name):
isfixture = hasattr(method, "_pytestfixturefunction")
if method is not None and not isfixture and callable(method):
# If there's any problems allow the exception to raise rather than
- # silently ignoring them
+ # silently ignoring them.
method()
return True
diff --git a/src/_pytest/outcomes.py b/src/_pytest/outcomes.py
index 751cf9474..f083689ed 100644
--- a/src/_pytest/outcomes.py
+++ b/src/_pytest/outcomes.py
@@ -1,7 +1,5 @@
-"""
-exception classes and constants handling test outcomes
-as well as functions creating them
-"""
+"""Exception classes and constants handling test outcomes as well as
+functions creating them."""
import sys
from typing import Any
from typing import Callable
@@ -9,7 +7,7 @@ from typing import cast
from typing import Optional
from typing import TypeVar
-TYPE_CHECKING = False # avoid circular import through compat
+TYPE_CHECKING = False # Avoid circular import through compat.
if TYPE_CHECKING:
from typing import NoReturn
@@ -25,9 +23,8 @@ else:
class OutcomeException(BaseException):
- """ OutcomeException and its subclass instances indicate and
- contain info about test and collection outcomes.
- """
+ """OutcomeException and its subclass instances indicate and contain info
+ about test and collection outcomes."""
def __init__(self, msg: Optional[str] = None, pytrace: bool = True) -> None:
if msg is not None and not isinstance(msg, str):
@@ -67,13 +64,13 @@ class Skipped(OutcomeException):
class Failed(OutcomeException):
- """ raised from an explicit call to pytest.fail() """
+ """Raised from an explicit call to pytest.fail()."""
__module__ = "builtins"
class Exit(Exception):
- """ raised for immediate program exits (no tracebacks/summaries)"""
+ """Raised for immediate program exits (no tracebacks/summaries)."""
def __init__(
self, msg: str = "unknown reason", returncode: Optional[int] = None
@@ -104,16 +101,15 @@ def _with_exception(exception_type: _ET) -> Callable[[_F], _WithException[_F, _E
return decorate
-# exposed helper methods
+# Exposed helper methods.
@_with_exception(Exit)
def exit(msg: str, returncode: Optional[int] = None) -> "NoReturn":
- """
- Exit testing process.
+ """Exit testing process.
- :param str msg: message to display upon exit.
- :param int returncode: return code to be used when exiting pytest.
+ :param str msg: Message to display upon exit.
+ :param int returncode: Return code to be used when exiting pytest.
"""
__tracebackhide__ = True
raise Exit(msg, returncode)
@@ -121,20 +117,20 @@ def exit(msg: str, returncode: Optional[int] = None) -> "NoReturn":
@_with_exception(Skipped)
def skip(msg: str = "", *, allow_module_level: bool = False) -> "NoReturn":
- """
- Skip an executing test with the given message.
+ """Skip an executing test with the given message.
This function should be called only during testing (setup, call or teardown) or
during collection by using the ``allow_module_level`` flag. This function can
be called in doctests as well.
- :kwarg bool allow_module_level: allows this function to be called at
- module level, skipping the rest of the module. Default to False.
+ :param bool allow_module_level:
+ Allows this function to be called at module level, skipping the rest
+ of the module. Defaults to False.
.. note::
- It is better to use the :ref:`pytest.mark.skipif ref` marker when possible to declare a test to be
- skipped under certain conditions like mismatching platforms or
- dependencies.
+ It is better to use the :ref:`pytest.mark.skipif ref` marker when
+ possible to declare a test to be skipped under certain conditions
+ like mismatching platforms or dependencies.
Similarly, use the ``# doctest: +SKIP`` directive (see `doctest.SKIP
<https://docs.python.org/3/library/doctest.html#doctest.SKIP>`_)
to skip a doctest statically.
@@ -145,11 +141,12 @@ def skip(msg: str = "", *, allow_module_level: bool = False) -> "NoReturn":
@_with_exception(Failed)
def fail(msg: str = "", pytrace: bool = True) -> "NoReturn":
- """
- Explicitly fail an executing test with the given message.
+ """Explicitly fail an executing test with the given message.
- :param str msg: the message to show the user as reason for the failure.
- :param bool pytrace: if false the msg represents the full failure information and no
+ :param str msg:
+ The message to show the user as reason for the failure.
+ :param bool pytrace:
+ If False, msg represents the full failure information and no
python traceback will be reported.
"""
__tracebackhide__ = True
@@ -157,19 +154,19 @@ def fail(msg: str = "", pytrace: bool = True) -> "NoReturn":
class XFailed(Failed):
- """ raised from an explicit call to pytest.xfail() """
+ """Raised from an explicit call to pytest.xfail()."""
@_with_exception(XFailed)
def xfail(reason: str = "") -> "NoReturn":
- """
- Imperatively xfail an executing test or setup functions with the given reason.
+ """Imperatively xfail an executing test or setup function with the given reason.
This function should be called only during testing (setup, call or teardown).
.. note::
- It is better to use the :ref:`pytest.mark.xfail ref` marker when possible to declare a test to be
- xfailed under certain conditions like known bugs or missing features.
+ It is better to use the :ref:`pytest.mark.xfail ref` marker when
+ possible to declare a test to be xfailed under certain conditions
+ like known bugs or missing features.
"""
__tracebackhide__ = True
raise XFailed(reason)
@@ -178,17 +175,20 @@ def xfail(reason: str = "") -> "NoReturn":
def importorskip(
modname: str, minversion: Optional[str] = None, reason: Optional[str] = None
) -> Any:
- """Imports and returns the requested module ``modname``, or skip the
+ """Import and return the requested module ``modname``, or skip the
current test if the module cannot be imported.
- :param str modname: the name of the module to import
- :param str minversion: if given, the imported module's ``__version__``
- attribute must be at least this minimal version, otherwise the test is
- still skipped.
- :param str reason: if given, this reason is shown as the message when the
- module cannot be imported.
- :returns: The imported module. This should be assigned to its canonical
- name.
+ :param str modname:
+ The name of the module to import.
+ :param str minversion:
+ If given, the imported module's ``__version__`` attribute must be at
+ least this minimal version, otherwise the test is still skipped.
+ :param str reason:
+ If given, this reason is shown as the message when the module cannot
+ be imported.
+
+ :returns:
+ The imported module. This should be assigned to its canonical name.
Example::
@@ -200,9 +200,9 @@ def importorskip(
compile(modname, "", "eval") # to catch syntaxerrors
with warnings.catch_warnings():
- # make sure to ignore ImportWarnings that might happen because
+ # Make sure to ignore ImportWarnings that might happen because
# of existing directories with the same name we're trying to
- # import but without a __init__.py file
+ # import but without a __init__.py file.
warnings.simplefilter("ignore")
try:
__import__(modname)
diff --git a/src/_pytest/pastebin.py b/src/_pytest/pastebin.py
index a3432c7a1..0546d2377 100644
--- a/src/_pytest/pastebin.py
+++ b/src/_pytest/pastebin.py
@@ -1,4 +1,4 @@
-""" submit failure or test session information to a pastebin service. """
+"""Submit failure or test session information to a pastebin service."""
import tempfile
from io import StringIO
from typing import IO
@@ -32,11 +32,11 @@ def pytest_addoption(parser: Parser) -> None:
def pytest_configure(config: Config) -> None:
if config.option.pastebin == "all":
tr = config.pluginmanager.getplugin("terminalreporter")
- # if no terminal reporter plugin is present, nothing we can do here;
+ # If no terminal reporter plugin is present, nothing we can do here;
# this can happen when this function executes in a worker node
- # when using pytest-xdist, for example
+ # when using pytest-xdist, for example.
if tr is not None:
- # pastebin file will be utf-8 encoded binary file
+ # pastebin file will be UTF-8 encoded binary file.
config._store[pastebinfile_key] = tempfile.TemporaryFile("w+b")
oldwrite = tr._tw.write
@@ -52,26 +52,25 @@ def pytest_configure(config: Config) -> None:
def pytest_unconfigure(config: Config) -> None:
if pastebinfile_key in config._store:
pastebinfile = config._store[pastebinfile_key]
- # get terminal contents and delete file
+ # Get terminal contents and delete file.
pastebinfile.seek(0)
sessionlog = pastebinfile.read()
pastebinfile.close()
del config._store[pastebinfile_key]
- # undo our patching in the terminal reporter
+ # Undo our patching in the terminal reporter.
tr = config.pluginmanager.getplugin("terminalreporter")
del tr._tw.__dict__["write"]
- # write summary
+ # Write summary.
tr.write_sep("=", "Sending information to Paste Service")
pastebinurl = create_new_paste(sessionlog)
tr.write_line("pastebin session-log: %s\n" % pastebinurl)
def create_new_paste(contents: Union[str, bytes]) -> str:
- """
- Creates a new paste using bpaste.net service.
+ """Create a new paste using the bpaste.net service.
- :contents: paste contents string
- :returns: url to the pasted contents or error message
+ :contents: Paste contents string.
+ :returns: URL to the pasted contents, or an error message.
"""
import re
from urllib.request import urlopen
diff --git a/src/_pytest/pathlib.py b/src/_pytest/pathlib.py
index ba7e9948a..ea263be70 100644
--- a/src/_pytest/pathlib.py
+++ b/src/_pytest/pathlib.py
@@ -49,23 +49,21 @@ def get_lock_path(path: _AnyPurePath) -> _AnyPurePath:
def ensure_reset_dir(path: Path) -> None:
- """
- ensures the given path is an empty directory
- """
+ """Ensure the given path is an empty directory."""
if path.exists():
rm_rf(path)
path.mkdir()
def on_rm_rf_error(func, path: str, exc, *, start_path: Path) -> bool:
- """Handles known read-only errors during rmtree.
+ """Handle known read-only errors during rmtree.
The returned value is used only by our own tests.
"""
exctype, excvalue = exc[:2]
- # another process removed the file in the middle of the "rm_rf" (xdist for example)
- # more context: https://github.com/pytest-dev/pytest/issues/5974#issuecomment-543799018
+ # Another process removed the file in the middle of the "rm_rf" (xdist for example).
+ # More context: https://github.com/pytest-dev/pytest/issues/5974#issuecomment-543799018
if isinstance(excvalue, FileNotFoundError):
return False
@@ -101,7 +99,7 @@ def on_rm_rf_error(func, path: str, exc, *, start_path: Path) -> bool:
if p.is_file():
for parent in p.parents:
chmod_rw(str(parent))
- # stop when we reach the original path passed to rm_rf
+ # Stop when we reach the original path passed to rm_rf.
if parent == start_path:
break
chmod_rw(str(path))
@@ -129,7 +127,7 @@ def ensure_extended_length_path(path: Path) -> Path:
def get_extended_length_path_str(path: str) -> str:
- """Converts to extended length path as a str"""
+ """Convert a path to a Windows extended length path."""
long_path_prefix = "\\\\?\\"
unc_long_path_prefix = "\\\\?\\UNC\\"
if path.startswith((long_path_prefix, unc_long_path_prefix)):
@@ -142,15 +140,14 @@ def get_extended_length_path_str(path: str) -> str:
def rm_rf(path: Path) -> None:
"""Remove the path contents recursively, even if some elements
- are read-only.
- """
+ are read-only."""
path = ensure_extended_length_path(path)
onerror = partial(on_rm_rf_error, start_path=path)
shutil.rmtree(str(path), onerror=onerror)
def find_prefixed(root: Path, prefix: str) -> Iterator[Path]:
- """finds all elements in root that begin with the prefix, case insensitive"""
+ """Find all elements in root that begin with the prefix, case insensitive."""
l_prefix = prefix.lower()
for x in root.iterdir():
if x.name.lower().startswith(l_prefix):
@@ -158,10 +155,10 @@ def find_prefixed(root: Path, prefix: str) -> Iterator[Path]:
def extract_suffixes(iter: Iterable[PurePath], prefix: str) -> Iterator[str]:
- """
- :param iter: iterator over path names
- :param prefix: expected prefix of the path names
- :returns: the parts of the paths following the prefix
+ """Return the parts of the paths following the prefix.
+
+ :param iter: Iterator over path names.
+ :param prefix: Expected prefix of the path names.
"""
p_len = len(prefix)
for p in iter:
@@ -169,13 +166,12 @@ def extract_suffixes(iter: Iterable[PurePath], prefix: str) -> Iterator[str]:
def find_suffixes(root: Path, prefix: str) -> Iterator[str]:
- """combines find_prefixes and extract_suffixes
- """
+ """Combine find_prefixes and extract_suffixes."""
return extract_suffixes(find_prefixed(root, prefix), prefix)
def parse_num(maybe_num) -> int:
- """parses number path suffixes, returns -1 on error"""
+ """Parse number path suffixes, returns -1 on error."""
try:
return int(maybe_num)
except ValueError:
@@ -185,13 +181,13 @@ def parse_num(maybe_num) -> int:
def _force_symlink(
root: Path, target: Union[str, PurePath], link_to: Union[str, Path]
) -> None:
- """helper to create the current symlink
+ """Helper to create the current symlink.
- it's full of race conditions that are reasonably ok to ignore
- for the context of best effort linking to the latest test run
+ It's full of race conditions that are reasonably OK to ignore
+ for the context of best effort linking to the latest test run.
- the presumption being that in case of much parallelism
- the inaccuracy is going to be acceptable
+ The presumption being that in case of much parallelism
+ the inaccuracy is going to be acceptable.
"""
current_symlink = root.joinpath(target)
try:
@@ -205,7 +201,7 @@ def _force_symlink(
def make_numbered_dir(root: Path, prefix: str) -> Path:
- """create a directory with an increased number as suffix for the given prefix"""
+ """Create a directory with an increased number as suffix for the given prefix."""
for i in range(10):
# try up to 10 times to create the folder
max_existing = max(map(parse_num, find_suffixes(root, prefix)), default=-1)
@@ -226,7 +222,7 @@ def make_numbered_dir(root: Path, prefix: str) -> Path:
def create_cleanup_lock(p: Path) -> Path:
- """crates a lock to prevent premature folder cleanup"""
+ """Create a lock to prevent premature folder cleanup."""
lock_path = get_lock_path(p)
try:
fd = os.open(str(lock_path), os.O_WRONLY | os.O_CREAT | os.O_EXCL, 0o644)
@@ -243,7 +239,7 @@ def create_cleanup_lock(p: Path) -> Path:
def register_cleanup_lock_removal(lock_path: Path, register=atexit.register):
- """registers a cleanup function for removing a lock, by default on atexit"""
+ """Register a cleanup function for removing a lock, by default on atexit."""
pid = os.getpid()
def cleanup_on_exit(lock_path: Path = lock_path, original_pid: int = pid) -> None:
@@ -260,7 +256,8 @@ def register_cleanup_lock_removal(lock_path: Path, register=atexit.register):
def maybe_delete_a_numbered_dir(path: Path) -> None:
- """removes a numbered directory if its lock can be obtained and it does not seem to be in use"""
+ """Remove a numbered directory if its lock can be obtained and it does
+ not seem to be in use."""
path = ensure_extended_length_path(path)
lock_path = None
try:
@@ -277,8 +274,8 @@ def maybe_delete_a_numbered_dir(path: Path) -> None:
# * process cwd (Windows)
return
finally:
- # if we created the lock, ensure we remove it even if we failed
- # to properly remove the numbered dir
+ # If we created the lock, ensure we remove it even if we failed
+ # to properly remove the numbered dir.
if lock_path is not None:
try:
lock_path.unlink()
@@ -287,7 +284,7 @@ def maybe_delete_a_numbered_dir(path: Path) -> None:
def ensure_deletable(path: Path, consider_lock_dead_if_created_before: float) -> bool:
- """checks if `path` is deletable based on whether the lock file is expired"""
+ """Check if `path` is deletable based on whether the lock file is expired."""
if path.is_symlink():
return False
lock = get_lock_path(path)
@@ -304,9 +301,9 @@ def ensure_deletable(path: Path, consider_lock_dead_if_created_before: float) ->
return False
else:
if lock_time < consider_lock_dead_if_created_before:
- # wa want to ignore any errors while trying to remove the lock such as:
- # - PermissionDenied, like the file permissions have changed since the lock creation
- # - FileNotFoundError, in case another pytest process got here first.
+ # We want to ignore any errors while trying to remove the lock such as:
+ # - PermissionDenied, like the file permissions have changed since the lock creation;
+ # - FileNotFoundError, in case another pytest process got here first;
# and any other cause of failure.
with contextlib.suppress(OSError):
lock.unlink()
@@ -315,13 +312,13 @@ def ensure_deletable(path: Path, consider_lock_dead_if_created_before: float) ->
def try_cleanup(path: Path, consider_lock_dead_if_created_before: float) -> None:
- """tries to cleanup a folder if we can ensure it's deletable"""
+ """Try to cleanup a folder if we can ensure it's deletable."""
if ensure_deletable(path, consider_lock_dead_if_created_before):
maybe_delete_a_numbered_dir(path)
def cleanup_candidates(root: Path, prefix: str, keep: int) -> Iterator[Path]:
- """lists candidates for numbered directories to be removed - follows py.path"""
+ """List candidates for numbered directories to be removed - follows py.path."""
max_existing = max(map(parse_num, find_suffixes(root, prefix)), default=-1)
max_delete = max_existing - keep
paths = find_prefixed(root, prefix)
@@ -335,7 +332,7 @@ def cleanup_candidates(root: Path, prefix: str, keep: int) -> Iterator[Path]:
def cleanup_numbered_dir(
root: Path, prefix: str, keep: int, consider_lock_dead_if_created_before: float
) -> None:
- """cleanup for lock driven numbered directories"""
+ """Cleanup for lock driven numbered directories."""
for path in cleanup_candidates(root, prefix, keep):
try_cleanup(path, consider_lock_dead_if_created_before)
for path in root.glob("garbage-*"):
@@ -345,7 +342,7 @@ def cleanup_numbered_dir(
def make_numbered_dir_with_cleanup(
root: Path, prefix: str, keep: int, lock_timeout: float
) -> Path:
- """creates a numbered dir with a cleanup lock and removes old ones"""
+ """Create a numbered dir with a cleanup lock and remove old ones."""
e = None
for i in range(10):
try:
@@ -381,17 +378,18 @@ def resolve_from_str(input: str, root: py.path.local) -> Path:
def fnmatch_ex(pattern: str, path) -> bool:
- """FNMatcher port from py.path.common which works with PurePath() instances.
+ """A port of FNMatcher from py.path.common which works with PurePath() instances.
- The difference between this algorithm and PurePath.match() is that the latter matches "**" glob expressions
- for each part of the path, while this algorithm uses the whole path instead.
+ The difference between this algorithm and PurePath.match() is that the
+ latter matches "**" glob expressions for each part of the path, while
+ this algorithm uses the whole path instead.
For example:
- "tests/foo/bar/doc/test_foo.py" matches pattern "tests/**/doc/test*.py" with this algorithm, but not with
- PurePath.match().
+ "tests/foo/bar/doc/test_foo.py" matches pattern "tests/**/doc/test*.py"
+ with this algorithm, but not with PurePath.match().
- This algorithm was ported to keep backward-compatibility with existing settings which assume paths match according
- this logic.
+ This algorithm was ported to keep backward-compatibility with existing
+ settings which assume paths match according this logic.
References:
* https://bugs.python.org/issue29249
@@ -421,7 +419,7 @@ def parts(s: str) -> Set[str]:
def symlink_or_skip(src, dst, **kwargs):
- """Makes a symlink or skips the test in case symlinks are not supported."""
+ """Make a symlink, or skip the test in case symlinks are not supported."""
try:
os.symlink(str(src), str(dst), **kwargs)
except OSError as e:
@@ -429,7 +427,7 @@ def symlink_or_skip(src, dst, **kwargs):
class ImportMode(Enum):
- """Possible values for `mode` parameter of `import_path`"""
+ """Possible values for `mode` parameter of `import_path`."""
prepend = "prepend"
append = "append"
@@ -450,8 +448,7 @@ def import_path(
*,
mode: Union[str, ImportMode] = ImportMode.prepend
) -> ModuleType:
- """
- Imports and returns a module from the given path, which can be a file (a module) or
+ """Import and return a module from the given path, which can be a file (a module) or
a directory (a package).
The import mechanism used is controlled by the `mode` parameter:
@@ -467,7 +464,8 @@ def import_path(
to import the module, which avoids having to use `__import__` and muck with `sys.path`
at all. It effectively allows having same-named test modules in different places.
- :raise ImportPathMismatchError: if after importing the given `path` and the module `__file__`
+ :raises ImportPathMismatchError:
+ If after importing the given `path` and the module `__file__`
are different. Only raised in `prepend` and `append` modes.
"""
mode = ImportMode(mode)
@@ -506,7 +504,7 @@ def import_path(
pkg_root = path.parent
module_name = path.stem
- # change sys.path permanently: restoring it at the end of this function would cause surprising
+ # Change sys.path permanently: restoring it at the end of this function would cause surprising
# problems because of delayed imports: for example, a conftest.py file imported by this function
# might have local imports, which would fail at runtime if we restored sys.path.
if mode is ImportMode.append:
@@ -546,7 +544,8 @@ def import_path(
def resolve_package_path(path: Path) -> Optional[Path]:
"""Return the Python package path by looking for the last
directory upwards which still contains an __init__.py.
- Return None if it can not be determined.
+
+ Returns None if it can not be determined.
"""
result = None
for parent in itertools.chain((path,), path.parents):
diff --git a/src/_pytest/pytester.py b/src/_pytest/pytester.py
index 594abee90..e0e7b1fbc 100644
--- a/src/_pytest/pytester.py
+++ b/src/_pytest/pytester.py
@@ -1,4 +1,4 @@
-"""(disabled by default) support for testing pytest and pytest plugins."""
+"""(Disabled by default) support for testing pytest and pytest plugins."""
import collections.abc
import gc
import importlib
@@ -166,9 +166,7 @@ class LsofFdLeakChecker:
def _pytest(request: FixtureRequest) -> "PytestArg":
"""Return a helper which offers a gethookrecorder(hook) method which
returns a HookRecorder instance which helps to make assertions about called
- hooks.
-
- """
+ hooks."""
return PytestArg(request)
@@ -208,7 +206,6 @@ class HookRecorder:
This wraps all the hook calls in the plugin manager, recording each call
before propagating the normal calls.
-
"""
def __init__(self, pluginmanager: PytestPluginManager) -> None:
@@ -285,7 +282,7 @@ class HookRecorder:
] = "pytest_runtest_logreport pytest_collectreport",
when=None,
):
- """return a testreport whose dotted import path matches"""
+ """Return a testreport whose dotted import path matches."""
values = []
for rep in self.getreports(names=names):
if not when and rep.when != "call" and rep.passed:
@@ -358,17 +355,14 @@ class HookRecorder:
@pytest.fixture
def linecomp() -> "LineComp":
- """
- A :class: `LineComp` instance for checking that an input linearly
- contains a sequence of strings.
- """
+ """A :class: `LineComp` instance for checking that an input linearly
+ contains a sequence of strings."""
return LineComp()
@pytest.fixture(name="LineMatcher")
def LineMatcher_fixture(request: FixtureRequest) -> "Type[LineMatcher]":
- """
- A reference to the :class: `LineMatcher`.
+ """A reference to the :class: `LineMatcher`.
This is instantiable with a list of lines (without their trailing newlines).
This is useful for testing large texts, such as the output of commands.
@@ -378,12 +372,10 @@ def LineMatcher_fixture(request: FixtureRequest) -> "Type[LineMatcher]":
@pytest.fixture
def testdir(request: FixtureRequest, tmpdir_factory: TempdirFactory) -> "Testdir":
- """
- A :class: `TestDir` instance, that can be used to run and test pytest itself.
+ """A :class: `TestDir` instance, that can be used to run and test pytest itself.
It is particularly useful for testing plugins. It is similar to the `tmpdir` fixture
but provides methods which aid in testing pytest itself.
-
"""
return Testdir(request, tmpdir_factory)
@@ -406,9 +398,9 @@ def _config_for_test() -> Generator[Config, None, None]:
config._ensure_unconfigure() # cleanup, e.g. capman closing tmpfiles.
-# regex to match the session duration string in the summary: "74.34s"
+# Regex to match the session duration string in the summary: "74.34s".
rex_session_duration = re.compile(r"\d+\.\d\ds")
-# regex to match all the counts and phrases in the summary line: "34 passed, 111 skipped"
+# Regex to match all the counts and phrases in the summary line: "34 passed, 111 skipped".
rex_outcome = re.compile(r"(\d+) (\w+)")
@@ -424,13 +416,13 @@ class RunResult:
) -> None:
try:
self.ret = pytest.ExitCode(ret) # type: Union[int, ExitCode]
- """the return value"""
+ """The return value."""
except ValueError:
self.ret = ret
self.outlines = outlines
- """list of lines captured from stdout"""
+ """List of lines captured from stdout."""
self.errlines = errlines
- """list of lines captured from stderr"""
+ """List of lines captured from stderr."""
self.stdout = LineMatcher(outlines)
""":class:`LineMatcher` of stdout.
@@ -438,9 +430,9 @@ class RunResult:
:func:`stdout.fnmatch_lines() <LineMatcher.fnmatch_lines()>` method.
"""
self.stderr = LineMatcher(errlines)
- """:class:`LineMatcher` of stderr"""
+ """:class:`LineMatcher` of stderr."""
self.duration = duration
- """duration in seconds"""
+ """Duration in seconds."""
def __repr__(self) -> str:
return (
@@ -456,19 +448,19 @@ class RunResult:
======= 1 failed, 1 passed, 1 warning, 1 error in 0.13s ====
- Will return ``{"failed": 1, "passed": 1, "warnings": 1, "errors": 1}``
+ Will return ``{"failed": 1, "passed": 1, "warnings": 1, "errors": 1}``.
"""
return self.parse_summary_nouns(self.outlines)
@classmethod
def parse_summary_nouns(cls, lines) -> Dict[str, int]:
- """Extracts the nouns from a pytest terminal summary line.
+ """Extract the nouns from a pytest terminal summary line.
It always returns the plural noun for consistency::
======= 1 failed, 1 passed, 1 warning, 1 error in 0.13s ====
- Will return ``{"failed": 1, "passed": 1, "warnings": 1, "errors": 1}``
+ Will return ``{"failed": 1, "passed": 1, "warnings": 1, "errors": 1}``.
"""
for line in reversed(lines):
if rex_session_duration.search(line):
@@ -494,8 +486,7 @@ class RunResult:
xfailed: int = 0,
) -> None:
"""Assert that the specified outcomes appear with the respective
- numbers (0 means it didn't occur) in the text output from a test run.
- """
+ numbers (0 means it didn't occur) in the text output from a test run."""
__tracebackhide__ = True
d = self.parseoutcomes()
@@ -551,7 +542,7 @@ class SysPathsSnapshot:
class Testdir:
"""Temporary test directory with tools to test/run pytest itself.
- This is based on the ``tmpdir`` fixture but provides a number of methods
+ This is based on the :fixture:`tmpdir` fixture but provides a number of methods
which aid with testing pytest itself. Unless :py:meth:`chdir` is used all
methods will use :py:attr:`tmpdir` as their current working directory.
@@ -559,11 +550,11 @@ class Testdir:
:ivar tmpdir: The :py:class:`py.path.local` instance of the temporary directory.
- :ivar plugins: A list of plugins to use with :py:meth:`parseconfig` and
+ :ivar plugins:
+ A list of plugins to use with :py:meth:`parseconfig` and
:py:meth:`runpytest`. Initially this is an empty list but plugins can
be added to the list. The type of items to add to the list depends on
the method using them so refer to them for details.
-
"""
__test__ = False
@@ -618,7 +609,6 @@ class Testdir:
Some methods modify the global interpreter state and this tries to
clean this up. It does not remove the temporary directory however so
it can be looked at after the test run has finished.
-
"""
self._sys_modules_snapshot.restore()
self._sys_path_snapshot.restore()
@@ -626,9 +616,9 @@ class Testdir:
self.monkeypatch.undo()
def __take_sys_modules_snapshot(self) -> SysModulesSnapshot:
- # some zope modules used by twisted-related tests keep internal state
+ # Some zope modules used by twisted-related tests keep internal state
# and can't be deleted; we had some trouble in the past with
- # `zope.interface` for example
+ # `zope.interface` for example.
def preserve_module(name):
return name.startswith("zope")
@@ -644,7 +634,6 @@ class Testdir:
"""Cd into the temporary directory.
This is done automatically upon instantiation.
-
"""
self.tmpdir.chdir()
@@ -673,12 +662,15 @@ class Testdir:
def makefile(self, ext: str, *args: str, **kwargs):
r"""Create new file(s) in the testdir.
- :param str ext: The extension the file(s) should use, including the dot, e.g. `.py`.
- :param list[str] args: All args will be treated as strings and joined using newlines.
- The result will be written as contents to the file. The name of the
- file will be based on the test function requesting this fixture.
- :param kwargs: Each keyword is the name of a file, while the value of it will
- be written as contents of the file.
+ :param str ext:
+ The extension the file(s) should use, including the dot, e.g. `.py`.
+ :param args:
+ All args are treated as strings and joined using newlines.
+ The result is written as contents to the file. The name of the
+ file is based on the test function requesting this fixture.
+ :param kwargs:
+ Each keyword is the name of a file, while the value of it will
+ be written as contents of the file.
Examples:
@@ -713,6 +705,7 @@ class Testdir:
def makepyfile(self, *args, **kwargs):
r"""Shortcut for .makefile() with a .py extension.
+
Defaults to the test name with a '.py' extension, e.g test_foobar.py, overwriting
existing files.
@@ -721,17 +714,18 @@ class Testdir:
.. code-block:: python
def test_something(testdir):
- # initial file is created test_something.py
+ # Initial file is created test_something.py.
testdir.makepyfile("foobar")
- # to create multiple files, pass kwargs accordingly
+ # To create multiple files, pass kwargs accordingly.
testdir.makepyfile(custom="foobar")
- # at this point, both 'test_something.py' & 'custom.py' exist in the test directory
+ # At this point, both 'test_something.py' & 'custom.py' exist in the test directory.
"""
return self._makefile(".py", args, kwargs)
def maketxtfile(self, *args, **kwargs):
r"""Shortcut for .makefile() with a .txt extension.
+
Defaults to the test name with a '.txt' extension, e.g test_foobar.txt, overwriting
existing files.
@@ -740,11 +734,11 @@ class Testdir:
.. code-block:: python
def test_something(testdir):
- # initial file is created test_something.txt
+ # Initial file is created test_something.txt.
testdir.maketxtfile("foobar")
- # to create multiple files, pass kwargs accordingly
+ # To create multiple files, pass kwargs accordingly.
testdir.maketxtfile(custom="foobar")
- # at this point, both 'test_something.txt' & 'custom.txt' exist in the test directory
+ # At this point, both 'test_something.txt' & 'custom.txt' exist in the test directory.
"""
return self._makefile(".txt", args, kwargs)
@@ -765,11 +759,10 @@ class Testdir:
return self.tmpdir.mkdir(name)
def mkpydir(self, name) -> py.path.local:
- """Create a new python package.
+ """Create a new Python package.
This creates a (sub)directory with an empty ``__init__.py`` file so it
- gets recognised as a python package.
-
+ gets recognised as a Python package.
"""
p = self.mkdir(name)
p.ensure("__init__.py")
@@ -779,8 +772,7 @@ class Testdir:
"""Copy file from project's directory into the testdir.
:param str name: The name of the file to copy.
- :return: path to the copied directory (inside ``self.tmpdir``).
-
+ :returns: Path to the copied directory (inside ``self.tmpdir``).
"""
import warnings
from _pytest.warning_types import PYTESTER_COPY_EXAMPLE
@@ -830,12 +822,11 @@ class Testdir:
def getnode(self, config: Config, arg):
"""Return the collection node of a file.
- :param config: :py:class:`_pytest.config.Config` instance, see
- :py:meth:`parseconfig` and :py:meth:`parseconfigure` to create the
- configuration
-
- :param arg: a :py:class:`py.path.local` instance of the file
-
+ :param _pytest.config.Config config:
+ A pytest config.
+ See :py:meth:`parseconfig` and :py:meth:`parseconfigure` for creating it.
+ :param py.path.local arg:
+ Path to the file.
"""
session = Session.from_config(config)
assert "::" not in str(arg)
@@ -851,8 +842,7 @@ class Testdir:
This is like :py:meth:`getnode` but uses :py:meth:`parseconfigure` to
create the (configured) pytest Config instance.
- :param path: a :py:class:`py.path.local` instance of the file
-
+ :param py.path.local path: Path to the file.
"""
config = self.parseconfigure(path)
session = Session.from_config(config)
@@ -867,7 +857,6 @@ class Testdir:
This recurses into the collection node and returns a list of all the
test items contained within.
-
"""
session = colitems[0].session
result = [] # type: List[Item]
@@ -882,7 +871,6 @@ class Testdir:
provide a ``.getrunner()`` method which should return a runner which
can run the test protocol for a single item, e.g.
:py:func:`_pytest.runner.runtestprotocol`.
-
"""
# used from runner functional tests
item = self.getitem(source)
@@ -898,12 +886,11 @@ class Testdir:
``pytest.main()`` on it, returning a :py:class:`HookRecorder` instance
for the result.
- :param source: the source code of the test module
+ :param source: The source code of the test module.
- :param cmdlineargs: any extra command line arguments to use
-
- :return: :py:class:`HookRecorder` instance of the result
+ :param cmdlineargs: Any extra command line arguments to use.
+ :returns: :py:class:`HookRecorder` instance of the result.
"""
p = self.makepyfile(source)
values = list(cmdlineargs) + [p]
@@ -915,7 +902,6 @@ class Testdir:
Runs the :py:func:`pytest.main` function to run all of pytest inside
the test process itself like :py:meth:`inline_run`, but returns a
tuple of the collected items and a :py:class:`HookRecorder` instance.
-
"""
rec = self.inline_run("--collect-only", *args)
items = [x.item for x in rec.getcalls("pytest_itemcollected")]
@@ -930,14 +916,15 @@ class Testdir:
from that run than can be done by matching stdout/stderr from
:py:meth:`runpytest`.
- :param args: command line arguments to pass to :py:func:`pytest.main`
-
- :kwarg plugins: extra plugin instances the ``pytest.main()`` instance should use.
-
- :kwarg no_reraise_ctrlc: typically we reraise keyboard interrupts from the child run. If
+ :param args:
+ Command line arguments to pass to :py:func:`pytest.main`.
+ :param plugins:
+ Extra plugin instances the ``pytest.main()`` instance should use.
+ :param no_reraise_ctrlc:
+ Typically we reraise keyboard interrupts from the child run. If
True, the KeyboardInterrupt exception is captured.
- :return: a :py:class:`HookRecorder` instance
+ :returns: A :py:class:`HookRecorder` instance.
"""
# (maybe a cpython bug?) the importlib cache sometimes isn't updated
# properly between file creation and inline_run (especially if imports
@@ -977,8 +964,8 @@ class Testdir:
reprec.ret = ret # type: ignore[attr-defined]
- # typically we reraise keyboard interrupts from the child run
- # because it's our user requesting interruption of the testing
+ # Typically we reraise keyboard interrupts from the child run
+ # because it's our user requesting interruption of the testing.
if ret == ExitCode.INTERRUPTED and not no_reraise_ctrlc:
calls = reprec.getcalls("pytest_keyboard_interrupt")
if calls and calls[-1].excinfo.type == KeyboardInterrupt:
@@ -990,8 +977,7 @@ class Testdir:
def runpytest_inprocess(self, *args, **kwargs) -> RunResult:
"""Return result of running pytest in-process, providing a similar
- interface to what self.runpytest() provides.
- """
+ interface to what self.runpytest() provides."""
syspathinsert = kwargs.pop("syspathinsert", False)
if syspathinsert:
@@ -1032,9 +1018,7 @@ class Testdir:
def runpytest(self, *args, **kwargs) -> RunResult:
"""Run pytest inline or in a subprocess, depending on the command line
- option "--runpytest" and return a :py:class:`RunResult`.
-
- """
+ option "--runpytest" and return a :py:class:`RunResult`."""
args = self._ensure_basetemp(args)
if self._method == "inprocess":
return self.runpytest_inprocess(*args, **kwargs)
@@ -1061,7 +1045,6 @@ class Testdir:
If :py:attr:`plugins` has been populated they should be plugin modules
to be registered with the PluginManager.
-
"""
args = self._ensure_basetemp(args)
@@ -1077,7 +1060,7 @@ class Testdir:
def parseconfigure(self, *args) -> Config:
"""Return a new pytest configured Config instance.
- This returns a new :py:class:`_pytest.config.Config` instance like
+ Returns a new :py:class:`_pytest.config.Config` instance like
:py:meth:`parseconfig`, but also calls the pytest_configure hook.
"""
config = self.parseconfig(*args)
@@ -1087,15 +1070,14 @@ class Testdir:
def getitem(self, source, funcname: str = "test_func") -> Item:
"""Return the test item for a test function.
- This writes the source to a python file and runs pytest's collection on
+ Writes the source to a python file and runs pytest's collection on
the resulting module, returning the test item for the requested
function name.
- :param source: the module source
-
- :param funcname: the name of the test function for which to return a
- test item
-
+ :param source:
+ The module source.
+ :param funcname:
+ The name of the test function for which to return a test item.
"""
items = self.getitems(source)
for item in items:
@@ -1108,9 +1090,8 @@ class Testdir:
def getitems(self, source) -> List[Item]:
"""Return all test items collected from the module.
- This writes the source to a python file and runs pytest's collection on
+ Writes the source to a Python file and runs pytest's collection on
the resulting module, returning all test items contained within.
-
"""
modcol = self.getmodulecol(source)
return self.genitems([modcol])
@@ -1118,18 +1099,19 @@ class Testdir:
def getmodulecol(self, source, configargs=(), withinit: bool = False):
"""Return the module collection node for ``source``.
- This writes ``source`` to a file using :py:meth:`makepyfile` and then
+ Writes ``source`` to a file using :py:meth:`makepyfile` and then
runs the pytest collection on it, returning the collection node for the
test module.
- :param source: the source code of the module to collect
+ :param source:
+ The source code of the module to collect.
- :param configargs: any extra arguments to pass to
- :py:meth:`parseconfigure`
-
- :param withinit: whether to also write an ``__init__.py`` file to the
- same directory to ensure it is a package
+ :param configargs:
+ Any extra arguments to pass to :py:meth:`parseconfigure`.
+ :param withinit:
+ Whether to also write an ``__init__.py`` file to the same
+ directory to ensure it is a package.
"""
if isinstance(source, Path):
path = self.tmpdir.join(str(source))
@@ -1147,12 +1129,11 @@ class Testdir:
) -> Optional[Union[Item, Collector]]:
"""Return the collection node for name from the module collection.
- This will search a module collection node for a collection node
- matching the given name.
-
- :param modcol: a module collection node; see :py:meth:`getmodulecol`
+ Searchs a module collection node for a collection node matching the
+ given name.
- :param name: the name of the node to return
+ :param modcol: A module collection node; see :py:meth:`getmodulecol`.
+ :param name: The name of the node to return.
"""
if modcol not in self._mod_collections:
self._mod_collections[modcol] = list(modcol.collect())
@@ -1171,11 +1152,10 @@ class Testdir:
):
"""Invoke subprocess.Popen.
- This calls subprocess.Popen making sure the current working directory
- is in the PYTHONPATH.
+ Calls subprocess.Popen making sure the current working directory is
+ in the PYTHONPATH.
You probably want to use :py:meth:`run` instead.
-
"""
env = os.environ.copy()
env["PYTHONPATH"] = os.pathsep.join(
@@ -1207,16 +1187,18 @@ class Testdir:
Run a process using subprocess.Popen saving the stdout and stderr.
- :param args: the sequence of arguments to pass to `subprocess.Popen()`
- :kwarg timeout: the period in seconds after which to timeout and raise
- :py:class:`Testdir.TimeoutExpired`
- :kwarg stdin: optional standard input. Bytes are being send, closing
+ :param args:
+ The sequence of arguments to pass to `subprocess.Popen()`.
+ :param timeout:
+ The period in seconds after which to timeout and raise
+ :py:class:`Testdir.TimeoutExpired`.
+ :param stdin:
+ Optional standard input. Bytes are being send, closing
the pipe, otherwise it is passed through to ``popen``.
Defaults to ``CLOSE_STDIN``, which translates to using a pipe
(``subprocess.PIPE``) that gets closed.
- Returns a :py:class:`RunResult`.
-
+ :rtype: RunResult
"""
__tracebackhide__ = True
@@ -1292,13 +1274,15 @@ class Testdir:
def runpython(self, script) -> RunResult:
"""Run a python script using sys.executable as interpreter.
- Returns a :py:class:`RunResult`.
-
+ :rtype: RunResult
"""
return self.run(sys.executable, script)
def runpython_c(self, command):
- """Run python -c "command", return a :py:class:`RunResult`."""
+ """Run python -c "command".
+
+ :rtype: RunResult
+ """
return self.run(sys.executable, "-c", command)
def runpytest_subprocess(self, *args, timeout: Optional[float] = None) -> RunResult:
@@ -1310,11 +1294,13 @@ class Testdir:
with "runpytest-" to not conflict with the normal numbered pytest
location for temporary files and directories.
- :param args: the sequence of arguments to pass to the pytest subprocess
- :param timeout: the period in seconds after which to timeout and raise
- :py:class:`Testdir.TimeoutExpired`
+ :param args:
+ The sequence of arguments to pass to the pytest subprocess.
+ :param timeout:
+ The period in seconds after which to timeout and raise
+ :py:class:`Testdir.TimeoutExpired`.
- Returns a :py:class:`RunResult`.
+ :rtype: RunResult
"""
__tracebackhide__ = True
p = make_numbered_dir(root=Path(str(self.tmpdir)), prefix="runpytest-")
@@ -1334,7 +1320,6 @@ class Testdir:
directory locations.
The pexpect child is returned.
-
"""
basetemp = self.tmpdir.mkdir("temp-pexpect")
invoke = " ".join(map(str, self._getpytestargs()))
@@ -1345,7 +1330,6 @@ class Testdir:
"""Run a command using pexpect.
The pexpect child is returned.
-
"""
pexpect = pytest.importorskip("pexpect", "3.0")
if hasattr(sys, "pypy_version_info") and "64" in platform.machine():
@@ -1400,14 +1384,12 @@ class LineMatcher:
return lines2
def fnmatch_lines_random(self, lines2: Sequence[str]) -> None:
- """Check lines exist in the output in any order (using :func:`python:fnmatch.fnmatch`).
- """
+ """Check lines exist in the output in any order (using :func:`python:fnmatch.fnmatch`)."""
__tracebackhide__ = True
self._match_lines_random(lines2, fnmatch)
def re_match_lines_random(self, lines2: Sequence[str]) -> None:
- """Check lines exist in the output in any order (using :func:`python:re.match`).
- """
+ """Check lines exist in the output in any order (using :func:`python:re.match`)."""
__tracebackhide__ = True
self._match_lines_random(lines2, lambda name, pat: bool(re.match(pat, name)))
@@ -1452,8 +1434,8 @@ class LineMatcher:
wildcards. If they do not match a pytest.fail() is called. The
matches and non-matches are also shown as part of the error message.
- :param lines2: string patterns to match.
- :param consecutive: match lines consecutive?
+ :param lines2: String patterns to match.
+ :param consecutive: Match lines consecutively?
"""
__tracebackhide__ = True
self._match_lines(lines2, fnmatch, "fnmatch", consecutive=consecutive)
@@ -1489,14 +1471,18 @@ class LineMatcher:
) -> None:
"""Underlying implementation of ``fnmatch_lines`` and ``re_match_lines``.
- :param list[str] lines2: list of string patterns to match. The actual
- format depends on ``match_func``
- :param match_func: a callable ``match_func(line, pattern)`` where line
- is the captured line from stdout/stderr and pattern is the matching
- pattern
- :param str match_nickname: the nickname for the match function that
- will be logged to stdout when a match occurs
- :param consecutive: match lines consecutively?
+ :param Sequence[str] lines2:
+ List of string patterns to match. The actual format depends on
+ ``match_func``.
+ :param match_func:
+ A callable ``match_func(line, pattern)`` where line is the
+ captured line from stdout/stderr and pattern is the matching
+ pattern.
+ :param str match_nickname:
+ The nickname for the match function that will be logged to stdout
+ when a match occurs.
+ :param consecutive:
+ Match lines consecutively?
"""
if not isinstance(lines2, collections.abc.Sequence):
raise TypeError("invalid type for lines2: {}".format(type(lines2).__name__))
@@ -1546,7 +1532,7 @@ class LineMatcher:
def no_fnmatch_line(self, pat: str) -> None:
"""Ensure captured lines do not match the given pattern, using ``fnmatch.fnmatch``.
- :param str pat: the pattern to match lines.
+ :param str pat: The pattern to match lines.
"""
__tracebackhide__ = True
self._no_match_line(pat, fnmatch, "fnmatch")
@@ -1554,7 +1540,7 @@ class LineMatcher:
def no_re_match_line(self, pat: str) -> None:
"""Ensure captured lines do not match the given pattern, using ``re.match``.
- :param str pat: the regular expression to match lines.
+ :param str pat: The regular expression to match lines.
"""
__tracebackhide__ = True
self._no_match_line(
@@ -1564,9 +1550,9 @@ class LineMatcher:
def _no_match_line(
self, pat: str, match_func: Callable[[str, str], bool], match_nickname: str
) -> None:
- """Ensure captured lines does not have a the given pattern, using ``fnmatch.fnmatch``
+ """Ensure captured lines does not have a the given pattern, using ``fnmatch.fnmatch``.
- :param str pat: the pattern to match lines
+ :param str pat: The pattern to match lines.
"""
__tracebackhide__ = True
nomatch_printed = False
diff --git a/src/_pytest/python.py b/src/_pytest/python.py
index 50f03eadb..589dfd06e 100644
--- a/src/_pytest/python.py
+++ b/src/_pytest/python.py
@@ -1,4 +1,4 @@
-""" Python test discovery, setup and run of test functions. """
+"""Python test discovery, setup and run of test functions."""
import enum
import fnmatch
import inspect
@@ -201,7 +201,7 @@ def pytest_collect_file(path: py.path.local, parent) -> Optional["Module"]:
def path_matches_patterns(path: py.path.local, patterns: Iterable[str]) -> bool:
- """Returns True if path matches any of the patterns in the list of globs given."""
+ """Return whether path matches any of the patterns in the list of globs given."""
return any(path.fnmatch(pattern) for pattern in patterns)
@@ -215,16 +215,16 @@ def pytest_pycollect_makemodule(path: py.path.local, parent) -> "Module":
@hookimpl(trylast=True)
def pytest_pycollect_makeitem(collector: "PyCollector", name: str, obj: object):
- # nothing was collected elsewhere, let's do it here
+ # Nothing was collected elsewhere, let's do it here.
if safe_isclass(obj):
if collector.istestclass(obj, name):
return Class.from_parent(collector, name=name, obj=obj)
elif collector.istestfunction(obj, name):
- # mock seems to store unbound methods (issue473), normalize it
+ # mock seems to store unbound methods (issue473), normalize it.
obj = getattr(obj, "__func__", obj)
# We need to try and unwrap the function if it's a functools.partial
# or a functools.wrapped.
- # We mustn't if it's been wrapped with mock.patch (python 2 only)
+ # We mustn't if it's been wrapped with mock.patch (python 2 only).
if not (inspect.isfunction(obj) or inspect.isfunction(get_real_func(obj))):
filename, lineno = getfslineno(obj)
warnings.warn_explicit(
@@ -298,14 +298,14 @@ class PyobjMixin:
self._obj = value
def _getobj(self):
- """Gets the underlying Python object. May be overwritten by subclasses."""
+ """Get the underlying Python object. May be overwritten by subclasses."""
# TODO: Improve the type of `parent` such that assert/ignore aren't needed.
assert self.parent is not None
obj = self.parent.obj # type: ignore[attr-defined]
return getattr(obj, self.name)
def getmodpath(self, stopatmodule: bool = True, includemodule: bool = False) -> str:
- """ return python path relative to the containing module. """
+ """Return Python path relative to the containing module."""
chain = self.listchain()
chain.reverse()
parts = []
@@ -346,8 +346,8 @@ class PyCollector(PyobjMixin, nodes.Collector):
return self._matches_prefix_or_glob_option("python_functions", name)
def isnosetest(self, obj: object) -> bool:
- """ Look for the __test__ attribute, which is applied by the
- @nose.tools.istest decorator
+ """Look for the __test__ attribute, which is applied by the
+ @nose.tools.istest decorator.
"""
# We explicitly check for "is True" here to not mistakenly treat
# classes with a custom __getattr__ returning something truthy (like a
@@ -360,7 +360,7 @@ class PyCollector(PyobjMixin, nodes.Collector):
def istestfunction(self, obj: object, name: str) -> bool:
if self.funcnamefilter(name) or self.isnosetest(obj):
if isinstance(obj, staticmethod):
- # static methods need to be unwrapped
+ # staticmethods need to be unwrapped.
obj = safe_getattr(obj, "__func__", False)
return (
safe_getattr(obj, "__call__", False)
@@ -373,16 +373,14 @@ class PyCollector(PyobjMixin, nodes.Collector):
return self.classnamefilter(name) or self.isnosetest(obj)
def _matches_prefix_or_glob_option(self, option_name: str, name: str) -> bool:
- """
- checks if the given name matches the prefix or glob-pattern defined
- in ini configuration.
- """
+ """Check if the given name matches the prefix or glob-pattern defined
+ in ini configuration."""
for option in self.config.getini(option_name):
if name.startswith(option):
return True
- # check that name looks like a glob-string before calling fnmatch
+ # Check that name looks like a glob-string before calling fnmatch
# because this is called for every name in each collected module,
- # and fnmatch is somewhat expensive to call
+ # and fnmatch is somewhat expensive to call.
elif ("*" in option or "?" in option or "[" in option) and fnmatch.fnmatch(
name, option
):
@@ -457,10 +455,10 @@ class PyCollector(PyobjMixin, nodes.Collector):
if not metafunc._calls:
yield Function.from_parent(self, name=name, fixtureinfo=fixtureinfo)
else:
- # add funcargs() as fixturedefs to fixtureinfo.arg2fixturedefs
+ # Add funcargs() as fixturedefs to fixtureinfo.arg2fixturedefs.
fixtures.add_funcarg_pseudo_fixture_def(self, metafunc, fm)
- # add_funcarg_pseudo_fixture_def may have shadowed some fixtures
+ # Add_funcarg_pseudo_fixture_def may have shadowed some fixtures
# with direct parametrization, so make sure we update what the
# function really needs.
fixtureinfo.prune_dependency_tree()
@@ -479,7 +477,7 @@ class PyCollector(PyobjMixin, nodes.Collector):
class Module(nodes.File, PyCollector):
- """ Collector for test classes and functions. """
+ """Collector for test classes and functions."""
def _getobj(self):
return self._importtestmodule()
@@ -491,7 +489,7 @@ class Module(nodes.File, PyCollector):
return super().collect()
def _inject_setup_module_fixture(self) -> None:
- """Injects a hidden autouse, module scoped fixture into the collected module object
+ """Inject a hidden autouse, module scoped fixture into the collected module object
that invokes setUpModule/tearDownModule if either or both are available.
Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with
@@ -518,7 +516,7 @@ class Module(nodes.File, PyCollector):
self.obj.__pytest_setup_module = xunit_setup_module_fixture
def _inject_setup_function_fixture(self) -> None:
- """Injects a hidden autouse, function scoped fixture into the collected module object
+ """Inject a hidden autouse, function scoped fixture into the collected module object
that invokes setup_function/teardown_function if either or both are available.
Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with
@@ -547,7 +545,7 @@ class Module(nodes.File, PyCollector):
self.obj.__pytest_setup_function = xunit_setup_function_fixture
def _importtestmodule(self):
- # we assume we are only called once per module
+ # We assume we are only called once per module.
importmode = self.config.getoption("--import-mode")
try:
mod = import_path(self.fspath, mode=importmode)
@@ -604,7 +602,7 @@ class Package(Module):
session=None,
nodeid=None,
) -> None:
- # NOTE: could be just the following, but kept as-is for compat.
+ # NOTE: Could be just the following, but kept as-is for compat.
# nodes.FSCollector.__init__(self, fspath, parent=parent)
session = parent.session
nodes.FSCollector.__init__(
@@ -613,8 +611,8 @@ class Package(Module):
self.name = os.path.basename(str(fspath.dirname))
def setup(self) -> None:
- # not using fixtures to call setup_module here because autouse fixtures
- # from packages are not called automatically (#4085)
+ # Not using fixtures to call setup_module here because autouse fixtures
+ # from packages are not called automatically (#4085).
setup_module = _get_first_non_fixture_func(
self.obj, ("setUpModule", "setup_module")
)
@@ -668,7 +666,7 @@ class Package(Module):
def _call_with_optional_argument(func, arg) -> None:
"""Call the given function with the given argument if func accepts one argument, otherwise
- calls func without arguments"""
+ calls func without arguments."""
arg_count = func.__code__.co_argcount
if inspect.ismethod(func):
arg_count -= 1
@@ -680,9 +678,7 @@ def _call_with_optional_argument(func, arg) -> None:
def _get_first_non_fixture_func(obj: object, names: Iterable[str]):
"""Return the attribute from the given object to be used as a setup/teardown
- xunit-style function, but only if not marked as a fixture to
- avoid calling it twice.
- """
+ xunit-style function, but only if not marked as a fixture to avoid calling it twice."""
for name in names:
meth = getattr(obj, name, None)
if meth is not None and fixtures.getfixturemarker(meth) is None:
@@ -690,13 +686,11 @@ def _get_first_non_fixture_func(obj: object, names: Iterable[str]):
class Class(PyCollector):
- """ Collector for test methods. """
+ """Collector for test methods."""
@classmethod
def from_parent(cls, parent, *, name, obj=None):
- """
- The public constructor
- """
+ """The public constructor."""
return super().from_parent(name=name, parent=parent)
def collect(self) -> Iterable[Union[nodes.Item, nodes.Collector]]:
@@ -729,7 +723,7 @@ class Class(PyCollector):
return [Instance.from_parent(self, name="()")]
def _inject_setup_class_fixture(self) -> None:
- """Injects a hidden autouse, class scoped fixture into the collected class object
+ """Inject a hidden autouse, class scoped fixture into the collected class object
that invokes setup_class/teardown_class if either or both are available.
Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with
@@ -753,7 +747,7 @@ class Class(PyCollector):
self.obj.__pytest_setup_class = xunit_setup_class_fixture
def _inject_setup_method_fixture(self) -> None:
- """Injects a hidden autouse, function scoped fixture into the collected class object
+ """Inject a hidden autouse, function scoped fixture into the collected class object
that invokes setup_method/teardown_method if either or both are available.
Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with
@@ -780,9 +774,9 @@ class Class(PyCollector):
class Instance(PyCollector):
_ALLOW_MARKERS = False # hack, destroy later
- # instances share the object with their parents in a way
+ # Instances share the object with their parents in a way
# that duplicates markers instances if not taken out
- # can be removed at node structure reorganization time
+ # can be removed at node structure reorganization time.
def _getobj(self):
# TODO: Improve the type of `parent` such that assert/ignore aren't needed.
@@ -874,8 +868,8 @@ class CallSpec2:
class Metafunc:
- """
- Metafunc objects are passed to the :func:`pytest_generate_tests <_pytest.hookspec.pytest_generate_tests>` hook.
+ """Objects passed to the :func:`pytest_generate_tests <_pytest.hookspec.pytest_generate_tests>` hook.
+
They help to inspect a test function and to generate tests according to
test configuration or values specified in the class or module where a
test function is defined.
@@ -891,19 +885,19 @@ class Metafunc:
) -> None:
self.definition = definition
- #: access to the :class:`_pytest.config.Config` object for the test session
+ #: Access to the :class:`_pytest.config.Config` object for the test session.
self.config = config
- #: the module object where the test function is defined in.
+ #: The module object where the test function is defined in.
self.module = module
- #: underlying python test function
+ #: Underlying Python test function.
self.function = definition.obj
- #: set of fixture names required by the test function
+ #: Set of fixture names required by the test function.
self.fixturenames = fixtureinfo.names_closure
- #: class object where the test function is defined in or ``None``.
+ #: Class object where the test function is defined in or ``None``.
self.cls = cls
self._calls = [] # type: List[CallSpec2]
@@ -911,7 +905,7 @@ class Metafunc:
@property
def funcargnames(self) -> List[str]:
- """ alias attribute for ``fixturenames`` for pre-2.3 compatibility"""
+ """Alias attribute for ``fixturenames`` for pre-2.3 compatibility."""
warnings.warn(FUNCARGNAMES, stacklevel=2)
return self.fixturenames
@@ -930,30 +924,35 @@ class Metafunc:
*,
_param_mark: Optional[Mark] = None
) -> None:
- """ Add new invocations to the underlying test function using the list
+ """Add new invocations to the underlying test function using the list
of argvalues for the given argnames. Parametrization is performed
during the collection phase. If you need to setup expensive resources
see about setting indirect to do it rather at test setup time.
- :arg argnames: a comma-separated string denoting one or more argument
- names, or a list/tuple of argument strings.
+ :param argnames:
+ A comma-separated string denoting one or more argument names, or
+ a list/tuple of argument strings.
+
+ :param argvalues:
+ The list of argvalues determines how often a test is invoked with
+ different argument values.
- :arg argvalues: The list of argvalues determines how often a
- test is invoked with different argument values. If only one
- argname was specified argvalues is a list of values. If N
- argnames were specified, argvalues must be a list of N-tuples,
- where each tuple-element specifies a value for its respective
- argname.
+ If only one argname was specified argvalues is a list of values.
+ If N argnames were specified, argvalues must be a list of
+ N-tuples, where each tuple-element specifies a value for its
+ respective argname.
- :arg indirect: The list of argnames or boolean. A list of arguments'
- names (subset of argnames). If True the list contains all names from
- the argnames. Each argvalue corresponding to an argname in this list will
+ :param indirect:
+ A list of arguments' names (subset of argnames) or a boolean.
+ If True the list contains all names from the argnames. Each
+ argvalue corresponding to an argname in this list will
be passed as request.param to its respective argname fixture
function so that it can perform more expensive setups during the
setup phase of a test rather than at collection time.
- :arg ids: sequence of (or generator for) ids for ``argvalues``,
- or a callable to return part of the id for each argvalue.
+ :param ids:
+ Sequence of (or generator for) ids for ``argvalues``,
+ or a callable to return part of the id for each argvalue.
With sequences (and generators like ``itertools.count()``) the
returned ids should be of type ``string``, ``int``, ``float``,
@@ -971,7 +970,8 @@ class Metafunc:
If no ids are provided they will be generated automatically from
the argvalues.
- :arg scope: if specified it denotes the scope of the parameters.
+ :param scope:
+ If specified it denotes the scope of the parameters.
The scope is used for grouping tests by parameter instances.
It will also override any fixture-function defined scope, allowing
to set a dynamic scope using test context or configuration.
@@ -1018,9 +1018,9 @@ class Metafunc:
scope, descr="parametrize() call in {}".format(self.function.__name__)
)
- # create the new calls: if we are parametrize() multiple times (by applying the decorator
+ # Create the new calls: if we are parametrize() multiple times (by applying the decorator
# more than once) then we accumulate those calls generating the cartesian product
- # of all calls
+ # of all calls.
newcalls = []
for callspec in self._calls or [CallSpec2(self)]:
for param_index, (param_id, param_set) in enumerate(zip(ids, parameters)):
@@ -1049,15 +1049,15 @@ class Metafunc:
parameters: typing.Sequence[ParameterSet],
nodeid: str,
) -> List[str]:
- """Resolves the actual ids for the given argnames, based on the ``ids`` parameter given
+ """Resolve the actual ids for the given argnames, based on the ``ids`` parameter given
to ``parametrize``.
- :param List[str] argnames: list of argument names passed to ``parametrize()``.
- :param ids: the ids parameter of the parametrized call (see docs).
- :param List[ParameterSet] parameters: the list of parameter values, same size as ``argnames``.
- :param str str: the nodeid of the item that generated this parametrized call.
+ :param List[str] argnames: List of argument names passed to ``parametrize()``.
+ :param ids: The ids parameter of the parametrized call (see docs).
+ :param List[ParameterSet] parameters: The list of parameter values, same size as ``argnames``.
+ :param str str: The nodeid of the item that generated this parametrized call.
:rtype: List[str]
- :return: the list of ids for each argname given
+ :returns: The list of ids for each argname given.
"""
if ids is None:
idfn = None
@@ -1109,11 +1109,12 @@ class Metafunc:
argnames: typing.Sequence[str],
indirect: Union[bool, typing.Sequence[str]],
) -> Dict[str, "Literal['params', 'funcargs']"]:
- """Resolves if each parametrized argument must be considered a parameter to a fixture or a "funcarg"
- to the function, based on the ``indirect`` parameter of the parametrized() call.
+ """Resolve if each parametrized argument must be considered a
+ parameter to a fixture or a "funcarg" to the function, based on the
+ ``indirect`` parameter of the parametrized() call.
- :param List[str] argnames: list of argument names passed to ``parametrize()``.
- :param indirect: same ``indirect`` parameter of ``parametrize()``.
+ :param List[str] argnames: List of argument names passed to ``parametrize()``.
+ :param indirect: Same as the ``indirect`` parameter of ``parametrize()``.
:rtype: Dict[str, str]
A dict mapping each arg name to either:
* "params" if the argname should be the parameter of a fixture of the same name.
@@ -1148,12 +1149,11 @@ class Metafunc:
argnames: typing.Sequence[str],
indirect: Union[bool, typing.Sequence[str]],
) -> None:
- """
- Check if all argnames are being used, by default values, or directly/indirectly.
+ """Check if all argnames are being used, by default values, or directly/indirectly.
- :param List[str] argnames: list of argument names passed to ``parametrize()``.
- :param indirect: same ``indirect`` parameter of ``parametrize()``.
- :raise ValueError: if validation fails.
+ :param List[str] argnames: List of argument names passed to ``parametrize()``.
+ :param indirect: Same as the ``indirect`` parameter of ``parametrize()``.
+ :raises ValueError: If validation fails.
"""
default_arg_names = set(get_default_arg_names(self.function))
func_name = self.function.__name__
@@ -1204,7 +1204,7 @@ def _find_parametrized_scope(
if name in argnames
]
if used_scopes:
- # Takes the most narrow scope from used fixtures
+ # Takes the most narrow scope from used fixtures.
for scope in reversed(fixtures.scopes):
if scope in used_scopes:
return scope
@@ -1259,7 +1259,7 @@ def _idval(
elif isinstance(val, enum.Enum):
return str(val)
elif isinstance(getattr(val, "__name__", None), str):
- # name of a class, function, module, etc.
+ # Name of a class, function, module, etc.
name = getattr(val, "__name__") # type: str
return name
return str(argname) + str(idx)
@@ -1306,13 +1306,13 @@ def idmaker(
unique_ids = set(resolved_ids)
if len(unique_ids) != len(resolved_ids):
- # Record the number of occurrences of each test ID
+ # Record the number of occurrences of each test ID.
test_id_counts = Counter(resolved_ids)
- # Map the test ID to its next suffix
+ # Map the test ID to its next suffix.
test_id_suffixes = defaultdict(int) # type: Dict[str, int]
- # Suffix non-unique IDs to make them unique
+ # Suffix non-unique IDs to make them unique.
for index, test_id in enumerate(resolved_ids):
if test_id_counts[test_id] > 1:
resolved_ids[index] = "{}{}".format(test_id, test_id_suffixes[test_id])
@@ -1365,12 +1365,12 @@ def _show_fixtures_per_test(config: Config, session: Session) -> None:
tw.sep("-", "fixtures used by {}".format(item.name))
# TODO: Fix this type ignore.
tw.sep("-", "({})".format(get_best_relpath(item.function))) # type: ignore[attr-defined]
- # dict key not used in loop but needed for sorting
+ # dict key not used in loop but needed for sorting.
for _, fixturedefs in sorted(info.name2fixturedefs.items()):
assert fixturedefs is not None
if not fixturedefs:
continue
- # last item is expected to be the one used by the test item
+ # Last item is expected to be the one used by the test item.
write_fixture(fixturedefs[-1])
for session_item in session.items:
@@ -1446,11 +1446,35 @@ def write_docstring(tw: TerminalWriter, doc: str, indent: str = " ") -> None:
class Function(PyobjMixin, nodes.Item):
- """ a Function Item is responsible for setting up and executing a
- Python test function.
+ """An Item responsible for setting up and executing a Python test function.
+
+ param name:
+ The full function name, including any decorations like those
+ added by parametrization (``my_func[my_param]``).
+ param parent:
+ The parent Node.
+ param config:
+ The pytest Config object.
+ param callspec:
+ If given, this is function has been parametrized and the callspec contains
+ meta information about the parametrization.
+ param callobj:
+ If given, the object which will be called when the Function is invoked,
+ otherwise the callobj will be obtained from ``parent`` using ``originalname``.
+ param keywords:
+ Keywords bound to the function object for "-k" matching.
+ param session:
+ The pytest Session object.
+ param fixtureinfo:
+ Fixture information already resolved at this fixture node..
+ param originalname:
+ The attribute name to use for accessing the underlying function object.
+ Defaults to ``name``. Set this if name is different from the original name,
+ for example when it contains decorations like those added by parametrization
+ (``my_func[my_param]``).
"""
- # disable since functions handle it themselves
+ # Disable since functions handle it themselves.
_ALLOW_MARKERS = False
def __init__(
@@ -1465,24 +1489,6 @@ class Function(PyobjMixin, nodes.Item):
fixtureinfo: Optional[FuncFixtureInfo] = None,
originalname: Optional[str] = None,
) -> None:
- """
- param name: the full function name, including any decorations like those
- added by parametrization (``my_func[my_param]``).
- param parent: the parent Node.
- param config: the pytest Config object
- param callspec: if given, this is function has been parametrized and the callspec contains
- meta information about the parametrization.
- param callobj: if given, the object which will be called when the Function is invoked,
- otherwise the callobj will be obtained from ``parent`` using ``originalname``
- param keywords: keywords bound to the function object for "-k" matching.
- param session: the pytest Session object
- param fixtureinfo: fixture information already resolved at this fixture node.
- param originalname:
- The attribute name to use for accessing the underlying function object.
- Defaults to ``name``. Set this if name is different from the original name,
- for example when it contains decorations like those added by parametrization
- (``my_func[my_param]``).
- """
super().__init__(name, parent, config=config, session=session)
if callobj is not NOTSET:
@@ -1496,8 +1502,8 @@ class Function(PyobjMixin, nodes.Item):
#: .. versionadded:: 3.0
self.originalname = originalname or name
- # note: when FunctionDefinition is introduced, we should change ``originalname``
- # to a readonly property that returns FunctionDefinition.name
+ # Note: when FunctionDefinition is introduced, we should change ``originalname``
+ # to a readonly property that returns FunctionDefinition.name.
self.keywords.update(self.obj.__dict__)
self.own_markers.extend(get_unpacked_marks(self.obj))
@@ -1535,9 +1541,7 @@ class Function(PyobjMixin, nodes.Item):
@classmethod
def from_parent(cls, parent, **kw): # todo: determine sound type limitations
- """
- The public constructor
- """
+ """The public constructor."""
return super().from_parent(parent=parent, **kw)
def _initrequest(self) -> None:
@@ -1546,7 +1550,7 @@ class Function(PyobjMixin, nodes.Item):
@property
def function(self):
- "underlying python 'function' object"
+ """Underlying python 'function' object."""
return getimfunc(self.obj)
def _getobj(self):
@@ -1555,17 +1559,17 @@ class Function(PyobjMixin, nodes.Item):
@property
def _pyfuncitem(self):
- "(compatonly) for code expecting pytest-2.2 style request objects"
+ """(compatonly) for code expecting pytest-2.2 style request objects."""
return self
@property
def funcargnames(self) -> List[str]:
- """ alias attribute for ``fixturenames`` for pre-2.3 compatibility"""
+ """Alias attribute for ``fixturenames`` for pre-2.3 compatibility."""
warnings.warn(FUNCARGNAMES, stacklevel=2)
return self.fixturenames
def runtest(self) -> None:
- """ execute the underlying test function. """
+ """Execute the underlying test function."""
self.ihook.pytest_pyfunc_call(pyfuncitem=self)
def setup(self) -> None:
@@ -1589,7 +1593,7 @@ class Function(PyobjMixin, nodes.Item):
excinfo.traceback = ntraceback.filter()
# issue364: mark all but first and last frames to
- # only show a single-line message for each frame
+ # only show a single-line message for each frame.
if self.config.getoption("tbstyle", "auto") == "auto":
if len(excinfo.traceback) > 2:
for entry in excinfo.traceback[1:-1]:
@@ -1606,10 +1610,8 @@ class Function(PyobjMixin, nodes.Item):
class FunctionDefinition(Function):
- """
- internal hack until we get actual definition nodes instead of the
- crappy metafunc hack
- """
+ """Internal hack until we get actual definition nodes instead of the
+ crappy metafunc hack."""
def runtest(self) -> None:
raise RuntimeError("function definitions are not supposed to be used")
diff --git a/src/_pytest/python_api.py b/src/_pytest/python_api.py
index fb6c76852..1bad5c777 100644
--- a/src/_pytest/python_api.py
+++ b/src/_pytest/python_api.py
@@ -39,10 +39,8 @@ def _non_numeric_type_error(value, at: Optional[str]) -> TypeError:
class ApproxBase:
- """
- Provide shared utilities for making approximate comparisons between numbers
- or sequences of numbers.
- """
+ """Provide shared utilities for making approximate comparisons between
+ numbers or sequences of numbers."""
# Tell numpy to use our `__eq__` operator instead of its.
__array_ufunc__ = None
@@ -74,16 +72,14 @@ class ApproxBase:
return ApproxScalar(x, rel=self.rel, abs=self.abs, nan_ok=self.nan_ok)
def _yield_comparisons(self, actual):
- """
- Yield all the pairs of numbers to be compared. This is used to
- implement the `__eq__` method.
+ """Yield all the pairs of numbers to be compared.
+
+ This is used to implement the `__eq__` method.
"""
raise NotImplementedError
def _check_type(self) -> None:
- """
- Raise a TypeError if the expected value is not a valid type.
- """
+ """Raise a TypeError if the expected value is not a valid type."""
# This is only a concern if the expected value is a sequence. In every
# other case, the approx() function ensures that the expected value has
# a numeric type. For this reason, the default is to do nothing. The
@@ -100,9 +96,7 @@ def _recursive_list_map(f, x):
class ApproxNumpy(ApproxBase):
- """
- Perform approximate comparisons where the expected value is numpy array.
- """
+ """Perform approximate comparisons where the expected value is numpy array."""
def __repr__(self) -> str:
list_scalars = _recursive_list_map(self._approx_scalar, self.expected.tolist())
@@ -111,7 +105,7 @@ class ApproxNumpy(ApproxBase):
def __eq__(self, actual) -> bool:
import numpy as np
- # self.expected is supposed to always be an array here
+ # self.expected is supposed to always be an array here.
if not np.isscalar(actual):
try:
@@ -142,10 +136,8 @@ class ApproxNumpy(ApproxBase):
class ApproxMapping(ApproxBase):
- """
- Perform approximate comparisons where the expected value is a mapping with
- numeric values (the keys can be anything).
- """
+ """Perform approximate comparisons where the expected value is a mapping
+ with numeric values (the keys can be anything)."""
def __repr__(self) -> str:
return "approx({!r})".format(
@@ -173,10 +165,7 @@ class ApproxMapping(ApproxBase):
class ApproxSequencelike(ApproxBase):
- """
- Perform approximate comparisons where the expected value is a sequence of
- numbers.
- """
+ """Perform approximate comparisons where the expected value is a sequence of numbers."""
def __repr__(self) -> str:
seq_type = type(self.expected)
@@ -207,9 +196,7 @@ class ApproxSequencelike(ApproxBase):
class ApproxScalar(ApproxBase):
- """
- Perform approximate comparisons where the expected value is a single number.
- """
+ """Perform approximate comparisons where the expected value is a single number."""
# Using Real should be better than this Union, but not possible yet:
# https://github.com/python/typeshed/pull/3108
@@ -217,13 +204,14 @@ class ApproxScalar(ApproxBase):
DEFAULT_RELATIVE_TOLERANCE = 1e-6 # type: Union[float, Decimal]
def __repr__(self) -> str:
- """
- Return a string communicating both the expected value and the tolerance
- for the comparison being made, e.g. '1.0 ± 1e-6', '(3+4j) ± 5e-6 ∠ ±180°'.
+ """Return a string communicating both the expected value and the
+ tolerance for the comparison being made.
+
+ For example, ``1.0 ± 1e-6``, ``(3+4j) ± 5e-6 ∠ ±180°``.
"""
# Infinities aren't compared using tolerances, so don't show a
- # tolerance. Need to call abs to handle complex numbers, e.g. (inf + 1j)
+ # tolerance. Need to call abs to handle complex numbers, e.g. (inf + 1j).
if math.isinf(abs(self.expected)):
return str(self.expected)
@@ -239,10 +227,8 @@ class ApproxScalar(ApproxBase):
return "{} ± {}".format(self.expected, vetted_tolerance)
def __eq__(self, actual) -> bool:
- """
- Return true if the given value is equal to the expected value within
- the pre-specified tolerance.
- """
+ """Return whether the given value is equal to the expected value
+ within the pre-specified tolerance."""
if _is_numpy_array(actual):
# Call ``__eq__()`` manually to prevent infinite-recursion with
# numpy<1.13. See #3748.
@@ -276,10 +262,10 @@ class ApproxScalar(ApproxBase):
@property
def tolerance(self):
- """
- Return the tolerance for the comparison. This could be either an
- absolute tolerance or a relative tolerance, depending on what the user
- specified or which would be larger.
+ """Return the tolerance for the comparison.
+
+ This could be either an absolute tolerance or a relative tolerance,
+ depending on what the user specified or which would be larger.
"""
def set_default(x, default):
@@ -323,17 +309,14 @@ class ApproxScalar(ApproxBase):
class ApproxDecimal(ApproxScalar):
- """
- Perform approximate comparisons where the expected value is a decimal.
- """
+ """Perform approximate comparisons where the expected value is a Decimal."""
DEFAULT_ABSOLUTE_TOLERANCE = Decimal("1e-12")
DEFAULT_RELATIVE_TOLERANCE = Decimal("1e-6")
def approx(expected, rel=None, abs=None, nan_ok: bool = False) -> ApproxBase:
- """
- Assert that two numbers (or two sets of numbers) are equal to each other
+ """Assert that two numbers (or two sets of numbers) are equal to each other
within some tolerance.
Due to the `intricacies of floating-point arithmetic`__, numbers that we
@@ -522,9 +505,9 @@ def approx(expected, rel=None, abs=None, nan_ok: bool = False) -> ApproxBase:
def _is_numpy_array(obj: object) -> bool:
- """
- Return true if the given object is a numpy array. Make a special effort to
- avoid importing numpy unless it's really necessary.
+ """Return true if the given object is a numpy array.
+
+ A special effort is made to avoid importing numpy unless it's really necessary.
"""
import sys
@@ -563,11 +546,11 @@ def raises( # noqa: F811
*args: Any,
**kwargs: Any
) -> Union["RaisesContext[_E]", _pytest._code.ExceptionInfo[_E]]:
- r"""
- Assert that a code block/function call raises ``expected_exception``
+ r"""Assert that a code block/function call raises ``expected_exception``
or raise a failure exception otherwise.
- :kwparam match: if specified, a string containing a regular expression,
+ :kwparam match:
+ If specified, a string containing a regular expression,
or a regular expression object, that is tested against the string
representation of the exception using ``re.search``. To match a literal
string that may contain `special characters`__, the pattern can
diff --git a/src/_pytest/recwarn.py b/src/_pytest/recwarn.py
index 11ca571aa..ded414ab4 100644
--- a/src/_pytest/recwarn.py
+++ b/src/_pytest/recwarn.py
@@ -1,4 +1,4 @@
-""" recording warnings during test function execution. """
+"""Record warnings during test function execution."""
import re
import warnings
from types import TracebackType
diff --git a/src/_pytest/reports.py b/src/_pytest/reports.py
index cbd9ae183..65098343b 100644
--- a/src/_pytest/reports.py
+++ b/src/_pytest/reports.py
@@ -94,9 +94,8 @@ class BaseReport:
@property
def longreprtext(self) -> str:
- """
- Read-only property that returns the full string representation
- of ``longrepr``.
+ """Read-only property that returns the full string representation of
+ ``longrepr``.
.. versionadded:: 3.0
"""
@@ -109,7 +108,7 @@ class BaseReport:
@property
def caplog(self) -> str:
- """Return captured log lines, if log capturing is enabled
+ """Return captured log lines, if log capturing is enabled.
.. versionadded:: 3.5
"""
@@ -119,7 +118,7 @@ class BaseReport:
@property
def capstdout(self) -> str:
- """Return captured text from stdout, if capturing is enabled
+ """Return captured text from stdout, if capturing is enabled.
.. versionadded:: 3.0
"""
@@ -129,7 +128,7 @@ class BaseReport:
@property
def capstderr(self) -> str:
- """Return captured text from stderr, if capturing is enabled
+ """Return captured text from stderr, if capturing is enabled.
.. versionadded:: 3.0
"""
@@ -147,11 +146,8 @@ class BaseReport:
@property
def count_towards_summary(self) -> bool:
- """
- **Experimental**
-
- ``True`` if this report should be counted towards the totals shown at the end of the
- test session: "1 passed, 1 failure, etc".
+ """**Experimental** Whether this report should be counted towards the
+ totals shown at the end of the test session: "1 passed, 1 failure, etc".
.. note::
@@ -162,11 +158,9 @@ class BaseReport:
@property
def head_line(self) -> Optional[str]:
- """
- **Experimental**
-
- Returns the head line shown with longrepr output for this report, more commonly during
- traceback representation during failures::
+ """**Experimental** The head line shown with longrepr output for this
+ report, more commonly during traceback representation during
+ failures::
________ Test.foo ________
@@ -190,11 +184,10 @@ class BaseReport:
return verbose
def _to_json(self) -> Dict[str, Any]:
- """
- This was originally the serialize_report() function from xdist (ca03269).
+ """Return the contents of this report as a dict of builtin entries,
+ suitable for serialization.
- Returns the contents of this report as a dict of builtin entries, suitable for
- serialization.
+ This was originally the serialize_report() function from xdist (ca03269).
Experimental method.
"""
@@ -202,11 +195,11 @@ class BaseReport:
@classmethod
def _from_json(cls: "Type[_R]", reportdict: Dict[str, object]) -> _R:
- """
- This was originally the serialize_report() function from xdist (ca03269).
+ """Create either a TestReport or CollectReport, depending on the calling class.
+
+ It is the callers responsibility to know which class to pass here.
- Factory method that returns either a TestReport or CollectReport, depending on the calling
- class. It's the callers responsibility to know which class to pass here.
+ This was originally the serialize_report() function from xdist (ca03269).
Experimental method.
"""
@@ -229,9 +222,8 @@ def _report_unserialization_failure(
class TestReport(BaseReport):
- """ Basic test report object (also used for setup and teardown calls if
- they fail).
- """
+ """Basic test report object (also used for setup and teardown calls if
+ they fail)."""
__test__ = False
@@ -248,38 +240,38 @@ class TestReport(BaseReport):
user_properties: Optional[Iterable[Tuple[str, object]]] = None,
**extra
) -> None:
- #: normalized collection node id
+ #: Normalized collection nodeid.
self.nodeid = nodeid
- #: a (filesystempath, lineno, domaininfo) tuple indicating the
+ #: A (filesystempath, lineno, domaininfo) tuple indicating the
#: actual location of a test item - it might be different from the
#: collected one e.g. if a method is inherited from a different module.
self.location = location # type: Tuple[str, Optional[int], str]
- #: a name -> value dictionary containing all keywords and
+ #: A name -> value dictionary containing all keywords and
#: markers associated with a test invocation.
self.keywords = keywords
- #: test outcome, always one of "passed", "failed", "skipped".
+ #: Test outcome, always one of "passed", "failed", "skipped".
self.outcome = outcome
#: None or a failure representation.
self.longrepr = longrepr
- #: one of 'setup', 'call', 'teardown' to indicate runtest phase.
+ #: One of 'setup', 'call', 'teardown' to indicate runtest phase.
self.when = when
- #: user properties is a list of tuples (name, value) that holds user
- #: defined properties of the test
+ #: User properties is a list of tuples (name, value) that holds user
+ #: defined properties of the test.
self.user_properties = list(user_properties or [])
- #: list of pairs ``(str, str)`` of extra information which needs to
+ #: List of pairs ``(str, str)`` of extra information which needs to
#: marshallable. Used by pytest to add captured text
#: from ``stdout`` and ``stderr``, but may be used by other plugins
#: to add arbitrary information to reports.
self.sections = list(sections)
- #: time it took to run just the test
+ #: Time it took to run just the test.
self.duration = duration
self.__dict__.update(extra)
@@ -291,9 +283,7 @@ class TestReport(BaseReport):
@classmethod
def from_item_and_call(cls, item: Item, call: "CallInfo[None]") -> "TestReport":
- """
- Factory method to create and fill a TestReport with standard item and call info.
- """
+ """Create and fill a TestReport with standard item and call info."""
when = call.when
# Remove "collect" from the Literal type -- only for collection calls.
assert when != "collect"
@@ -350,10 +340,10 @@ class CollectReport(BaseReport):
sections: Iterable[Tuple[str, str]] = (),
**extra
) -> None:
- #: normalized collection node id
+ #: Normalized collection nodeid.
self.nodeid = nodeid
- #: test outcome, always one of "passed", "failed", "skipped".
+ #: Test outcome, always one of "passed", "failed", "skipped".
self.outcome = outcome
#: None or a failure representation.
@@ -362,10 +352,11 @@ class CollectReport(BaseReport):
#: The collected items and collection nodes.
self.result = result or []
- #: list of pairs ``(str, str)`` of extra information which needs to
- #: marshallable. Used by pytest to add captured text
- #: from ``stdout`` and ``stderr``, but may be used by other plugins
- #: to add arbitrary information to reports.
+ #: List of pairs ``(str, str)`` of extra information which needs to
+ #: marshallable.
+ # Used by pytest to add captured text : from ``stdout`` and ``stderr``,
+ # but may be used by other plugins : to add arbitrary information to
+ # reports.
self.sections = list(sections)
self.__dict__.update(extra)
@@ -413,11 +404,10 @@ def pytest_report_from_serializable(
def _report_to_json(report: BaseReport) -> Dict[str, Any]:
- """
- This was originally the serialize_report() function from xdist (ca03269).
+ """Return the contents of this report as a dict of builtin entries,
+ suitable for serialization.
- Returns the contents of this report as a dict of builtin entries, suitable for
- serialization.
+ This was originally the serialize_report() function from xdist (ca03269).
"""
def serialize_repr_entry(
@@ -485,10 +475,10 @@ def _report_to_json(report: BaseReport) -> Dict[str, Any]:
def _report_kwargs_from_json(reportdict: Dict[str, Any]) -> Dict[str, Any]:
- """
- This was originally the serialize_report() function from xdist (ca03269).
+ """Return **kwargs that can be used to construct a TestReport or
+ CollectReport instance.
- Returns **kwargs that can be used to construct a TestReport or CollectReport instance.
+ This was originally the serialize_report() function from xdist (ca03269).
"""
def deserialize_repr_entry(entry_data):
diff --git a/src/_pytest/resultlog.py b/src/_pytest/resultlog.py
index cd6824abf..356a39c12 100644
--- a/src/_pytest/resultlog.py
+++ b/src/_pytest/resultlog.py
@@ -1,6 +1,4 @@
-""" log machine-parseable test session result information in a plain
-text file.
-"""
+"""log machine-parseable test session result information to a plain text file."""
import os
import py
@@ -30,7 +28,7 @@ def pytest_addoption(parser: Parser) -> None:
def pytest_configure(config: Config) -> None:
resultlog = config.option.resultlog
- # prevent opening resultlog on worker nodes (xdist)
+ # Prevent opening resultlog on worker nodes (xdist).
if resultlog and not hasattr(config, "workerinput"):
dirname = os.path.dirname(os.path.abspath(resultlog))
if not os.path.isdir(dirname):
diff --git a/src/_pytest/runner.py b/src/_pytest/runner.py
index 69754ad5e..289d676d6 100644
--- a/src/_pytest/runner.py
+++ b/src/_pytest/runner.py
@@ -1,4 +1,4 @@
-""" basic collect and runtest protocol implementations """
+"""Basic collect and runtest protocol implementations."""
import bdb
import os
import sys
@@ -39,7 +39,7 @@ if TYPE_CHECKING:
from _pytest.terminal import TerminalReporter
#
-# pytest plugin hooks
+# pytest plugin hooks.
def pytest_addoption(parser: Parser) -> None:
@@ -116,8 +116,8 @@ def runtestprotocol(
if not item.config.getoption("setuponly", False):
reports.append(call_and_report(item, "call", log))
reports.append(call_and_report(item, "teardown", log, nextitem=nextitem))
- # after all teardown hooks have been called
- # want funcargs and request info to go away
+ # After all teardown hooks have been called
+ # want funcargs and request info to go away.
if hasrequest:
item._request = False # type: ignore[attr-defined]
item.funcargs = None # type: ignore[attr-defined]
@@ -170,8 +170,7 @@ def pytest_runtest_teardown(item: Item, nextitem: Optional[Item]) -> None:
def _update_current_test_var(
item: Item, when: Optional["Literal['setup', 'call', 'teardown']"]
) -> None:
- """
- Update :envvar:`PYTEST_CURRENT_TEST` to reflect the current item and stage.
+ """Update :envvar:`PYTEST_CURRENT_TEST` to reflect the current item and stage.
If ``when`` is None, delete ``PYTEST_CURRENT_TEST`` from the environment.
"""
@@ -253,15 +252,21 @@ _T = TypeVar("_T")
@attr.s(repr=False)
class CallInfo(Generic[_T]):
- """ Result/Exception info a function invocation.
-
- :param T result: The return value of the call, if it didn't raise. Can only be accessed
- if excinfo is None.
- :param Optional[ExceptionInfo] excinfo: The captured exception of the call, if it raised.
- :param float start: The system time when the call started, in seconds since the epoch.
- :param float stop: The system time when the call ended, in seconds since the epoch.
- :param float duration: The call duration, in seconds.
- :param str when: The context of invocation: "setup", "call", "teardown", ...
+ """Result/Exception info a function invocation.
+
+ :param T result:
+ The return value of the call, if it didn't raise. Can only be
+ accessed if excinfo is None.
+ :param Optional[ExceptionInfo] excinfo:
+ The captured exception of the call, if it raised.
+ :param float start:
+ The system time when the call started, in seconds since the epoch.
+ :param float stop:
+ The system time when the call ended, in seconds since the epoch.
+ :param float duration:
+ The call duration, in seconds.
+ :param str when:
+ The context of invocation: "setup", "call", "teardown", ...
"""
_result = attr.ib(type="Optional[_T]")
@@ -352,14 +357,14 @@ def pytest_make_collect_report(collector: Collector) -> CollectReport:
class SetupState:
- """ shared state for setting up/tearing down test items or collectors. """
+ """Shared state for setting up/tearing down test items or collectors."""
def __init__(self):
self.stack = [] # type: List[Node]
self._finalizers = {} # type: Dict[Node, List[Callable[[], object]]]
def addfinalizer(self, finalizer: Callable[[], object], colitem) -> None:
- """ attach a finalizer to the given colitem. """
+ """Attach a finalizer to the given colitem."""
assert colitem and not isinstance(colitem, tuple)
assert callable(finalizer)
# assert colitem in self.stack # some unit tests don't setup stack :/
@@ -419,7 +424,7 @@ class SetupState:
def prepare(self, colitem) -> None:
"""Setup objects along the collector chain to the test-method."""
- # check if the last collection node has raised an error
+ # Check if the last collection node has raised an error.
for col in self.stack:
if hasattr(col, "_prepare_exc"):
exc = col._prepare_exc # type: ignore[attr-defined]
diff --git a/src/_pytest/skipping.py b/src/_pytest/skipping.py
index e333e78df..c5b4ff39e 100644
--- a/src/_pytest/skipping.py
+++ b/src/_pytest/skipping.py
@@ -1,4 +1,4 @@
-""" support for skip/xfail functions and markers. """
+"""Support for skip/xfail functions and markers."""
import os
import platform
import sys
@@ -298,9 +298,9 @@ def pytest_runtest_makereport(item: Item, call: CallInfo[None]):
and rep.skipped
and type(rep.longrepr) is tuple
):
- # skipped by mark.skipif; change the location of the failure
+ # Skipped by mark.skipif; change the location of the failure
# to point to the item definition, otherwise it will display
- # the location of where the skip exception was raised within pytest
+ # the location of where the skip exception was raised within pytest.
_, _, reason = rep.longrepr
filename, line = item.reportinfo()[:2]
assert line is not None
diff --git a/src/_pytest/store.py b/src/_pytest/store.py
index 2b46c4389..fbf3c588f 100644
--- a/src/_pytest/store.py
+++ b/src/_pytest/store.py
@@ -92,7 +92,7 @@ class Store:
def __getitem__(self, key: StoreKey[T]) -> T:
"""Get the value for key.
- Raises KeyError if the key wasn't set before.
+ Raises ``KeyError`` if the key wasn't set before.
"""
return cast(T, self._store[key])
@@ -116,10 +116,10 @@ class Store:
def __delitem__(self, key: StoreKey[T]) -> None:
"""Delete the value for key.
- Raises KeyError if the key wasn't set before.
+ Raises ``KeyError`` if the key wasn't set before.
"""
del self._store[key]
def __contains__(self, key: StoreKey[T]) -> bool:
- """Returns whether key was set."""
+ """Return whether key was set."""
return key in self._store
diff --git a/src/_pytest/terminal.py b/src/_pytest/terminal.py
index cbca9ba46..86c327226 100644
--- a/src/_pytest/terminal.py
+++ b/src/_pytest/terminal.py
@@ -1,4 +1,4 @@
-""" terminal reporting of the full testing process.
+"""Terminal reporting of the full testing process.
This is a good source for looking at the various reporting hooks.
"""
@@ -69,11 +69,10 @@ _REPORTCHARS_DEFAULT = "fE"
class MoreQuietAction(argparse.Action):
- """
- a modified copy of the argparse count action which counts down and updates
- the legacy quiet attribute at the same time
+ """A modified copy of the argparse count action which counts down and updates
+ the legacy quiet attribute at the same time.
- used to unify verbosity handling
+ Used to unify verbosity handling.
"""
def __init__(
@@ -276,13 +275,14 @@ def pytest_report_teststatus(report: BaseReport) -> Tuple[str, str, str]:
@attr.s
class WarningReport:
- """
- Simple structure to hold warnings information captured by ``pytest_warning_recorded``.
+ """Simple structure to hold warnings information captured by ``pytest_warning_recorded``.
- :ivar str message: user friendly message about the warning
- :ivar str|None nodeid: node id that generated the warning (see ``get_location``).
+ :ivar str message:
+ User friendly message about the warning.
+ :ivar str|None nodeid:
+ nodeid that generated the warning (see ``get_location``).
:ivar tuple|py.path.local fslocation:
- file system location of the source of the warning (see ``get_location``).
+ File system location of the source of the warning (see ``get_location``).
"""
message = attr.ib(type=str)
@@ -293,10 +293,7 @@ class WarningReport:
count_towards_summary = True
def get_location(self, config: Config) -> Optional[str]:
- """
- Returns the more user-friendly information about the location
- of a warning, or None.
- """
+ """Return the more user-friendly information about the location of a warning, or None."""
if self.nodeid:
return self.nodeid
if self.fslocation:
@@ -349,7 +346,7 @@ class TerminalReporter:
self._tw = value
def _determine_show_progress_info(self) -> "Literal['progress', 'count', False]":
- """Return True if we should display progress information based on the current config"""
+ """Return whether we should display progress information based on the current config."""
# do not show progress if we are not capturing output (#3038)
if self.config.getoption("capture", "no") == "no":
return False
@@ -439,10 +436,10 @@ class TerminalReporter:
self._tw.line(line, **markup)
def rewrite(self, line: str, **markup: bool) -> None:
- """
- Rewinds the terminal cursor to the beginning and writes the given line.
+ """Rewinds the terminal cursor to the beginning and writes the given line.
- :kwarg erase: if True, will also add spaces until the full terminal width to ensure
+ :param erase:
+ If True, will also add spaces until the full terminal width to ensure
previous lines are properly erased.
The rest of the keyword arguments are markup instructions.
@@ -499,9 +496,9 @@ class TerminalReporter:
def pytest_plugin_registered(self, plugin: _PluggyPlugin) -> None:
if self.config.option.traceconfig:
msg = "PLUGIN registered: {}".format(plugin)
- # XXX this event may happen during setup/teardown time
+ # XXX This event may happen during setup/teardown time
# which unfortunately captures our output here
- # which garbles our output if we use self.write_line
+ # which garbles our output if we use self.write_line.
self.write_line(msg)
def pytest_deselected(self, items: Sequence[Item]) -> None:
@@ -510,8 +507,8 @@ class TerminalReporter:
def pytest_runtest_logstart(
self, nodeid: str, location: Tuple[str, Optional[int], str]
) -> None:
- # ensure that the path is printed before the
- # 1st test of a module starts running
+ # Ensure that the path is printed before the
+ # 1st test of a module starts running.
if self.showlongtestinfo:
line = self._locationline(nodeid, *location)
self.write_ensure_prefix(line, "")
@@ -533,7 +530,7 @@ class TerminalReporter:
markup = None
self._add_stats(category, [rep])
if not letter and not word:
- # probably passed setup/teardown
+ # Probably passed setup/teardown.
return
running_xdist = hasattr(rep, "node")
if markup is None:
@@ -623,7 +620,7 @@ class TerminalReporter:
@property
def _width_of_current_line(self) -> int:
- """Return the width of current line, using the superior implementation of py-1.6 when available"""
+ """Return the width of the current line."""
return self._tw.width_of_current_line
def pytest_collection(self) -> None:
@@ -761,9 +758,9 @@ class TerminalReporter:
rep.toterminal(self._tw)
def _printcollecteditems(self, items: Sequence[Item]) -> None:
- # to print out items and their parent collectors
+ # To print out items and their parent collectors
# we take care to leave out Instances aka ()
- # because later versions are going to get rid of them anyway
+ # because later versions are going to get rid of them anyway.
if self.config.option.verbose < 0:
if self.config.option.verbose < -1:
counts = {} # type: Dict[str, int]
@@ -868,7 +865,7 @@ class TerminalReporter:
line += "[".join(values)
return line
- # collect_fspath comes from testid which has a "/"-normalized path
+ # collect_fspath comes from testid which has a "/"-normalized path.
if fspath:
res = mkrel(nodeid)
@@ -896,7 +893,7 @@ class TerminalReporter:
return ""
#
- # summaries for sessionfinish
+ # Summaries for sessionfinish.
#
def getreports(self, name: str):
values = []
@@ -1255,9 +1252,9 @@ def _folded_skips(
# For consistency, report all fspaths in relative form.
fspath = startdir.bestrelpath(py.path.local(fspath))
keywords = getattr(event, "keywords", {})
- # folding reports with global pytestmark variable
- # this is workaround, because for now we cannot identify the scope of a skip marker
- # TODO: revisit after marks scope would be fixed
+ # Folding reports with global pytestmark variable.
+ # This is a workaround, because for now we cannot identify the scope of a skip marker
+ # TODO: Revisit after marks scope would be fixed.
if (
event.when == "setup"
and "skip" in keywords
@@ -1298,20 +1295,19 @@ def _make_plural(count: int, noun: str) -> Tuple[int, str]:
def _plugin_nameversions(plugininfo) -> List[str]:
values = [] # type: List[str]
for plugin, dist in plugininfo:
- # gets us name and version!
+ # Gets us name and version!
name = "{dist.project_name}-{dist.version}".format(dist=dist)
- # questionable convenience, but it keeps things short
+ # Questionable convenience, but it keeps things short.
if name.startswith("pytest-"):
name = name[7:]
- # we decided to print python package names
- # they can have more than one plugin
+ # We decided to print python package names they can have more than one plugin.
if name not in values:
values.append(name)
return values
def format_session_duration(seconds: float) -> str:
- """Format the given seconds in a human readable manner to show in the final summary"""
+ """Format the given seconds in a human readable manner to show in the final summary."""
if seconds < 60:
return "{:.2f}s".format(seconds)
else:
diff --git a/src/_pytest/timing.py b/src/_pytest/timing.py
index ded917b35..62442de75 100644
--- a/src/_pytest/timing.py
+++ b/src/_pytest/timing.py
@@ -1,5 +1,4 @@
-"""
-Indirection for time functions.
+"""Indirection for time functions.
We intentionally grab some "time" functions internally to avoid tests mocking "time" to affect
pytest runtime information (issue #185).
diff --git a/src/_pytest/tmpdir.py b/src/_pytest/tmpdir.py
index 58dd65908..017577a7a 100644
--- a/src/_pytest/tmpdir.py
+++ b/src/_pytest/tmpdir.py
@@ -1,4 +1,4 @@
-""" support for providing temporary directories to test functions. """
+"""Support for providing temporary directories to test functions."""
import os
import re
import tempfile
@@ -22,13 +22,14 @@ from _pytest.monkeypatch import MonkeyPatch
class TempPathFactory:
"""Factory for temporary directories under the common base temp directory.
- The base directory can be configured using the ``--basetemp`` option."""
+ The base directory can be configured using the ``--basetemp`` option.
+ """
_given_basetemp = attr.ib(
type=Path,
- # using os.path.abspath() to get absolute path instead of resolve() as it
- # does not work the same in all platforms (see #4427)
- # Path.absolute() exists, but it is not public (see https://bugs.python.org/issue25012)
+ # Use os.path.abspath() to get absolute path instead of resolve() as it
+ # does not work the same in all platforms (see #4427).
+ # Path.absolute() exists, but it is not public (see https://bugs.python.org/issue25012).
# Ignore type because of https://github.com/python/mypy/issues/6172.
converter=attr.converters.optional(
lambda p: Path(os.path.abspath(str(p))) # type: ignore
@@ -38,10 +39,8 @@ class TempPathFactory:
_basetemp = attr.ib(type=Optional[Path], default=None)
@classmethod
- def from_config(cls, config) -> "TempPathFactory":
- """
- :param config: a pytest configuration
- """
+ def from_config(cls, config: Config) -> "TempPathFactory":
+ """Create a factory according to pytest configuration."""
return cls(
given_basetemp=config.option.basetemp, trace=config.trace.get("tmpdir")
)
@@ -55,7 +54,7 @@ class TempPathFactory:
return basename
def mktemp(self, basename: str, numbered: bool = True) -> Path:
- """Creates a new temporary directory managed by the factory.
+ """Create a new temporary directory managed by the factory.
:param basename:
Directory base name, must be a relative path.
@@ -66,7 +65,7 @@ class TempPathFactory:
means that this function will create directories named ``"foo-0"``,
``"foo-1"``, ``"foo-2"`` and so on.
- :return:
+ :returns:
The path to the new directory.
"""
basename = self._ensure_relative_to_basetemp(basename)
@@ -79,7 +78,7 @@ class TempPathFactory:
return p
def getbasetemp(self) -> Path:
- """ return base temporary directory. """
+ """Return base temporary directory."""
if self._basetemp is not None:
return self._basetemp
@@ -106,28 +105,23 @@ class TempPathFactory:
@attr.s
class TempdirFactory:
- """
- backward comptibility wrapper that implements
- :class:``py.path.local`` for :class:``TempPathFactory``
- """
+ """Backward comptibility wrapper that implements :class:``py.path.local``
+ for :class:``TempPathFactory``."""
_tmppath_factory = attr.ib(type=TempPathFactory)
def mktemp(self, basename: str, numbered: bool = True) -> py.path.local:
- """
- Same as :meth:`TempPathFactory.mkdir`, but returns a ``py.path.local`` object.
- """
+ """Same as :meth:`TempPathFactory.mkdir`, but returns a ``py.path.local`` object."""
return py.path.local(self._tmppath_factory.mktemp(basename, numbered).resolve())
def getbasetemp(self) -> py.path.local:
- """backward compat wrapper for ``_tmppath_factory.getbasetemp``"""
+ """Backward compat wrapper for ``_tmppath_factory.getbasetemp``."""
return py.path.local(self._tmppath_factory.getbasetemp().resolve())
def get_user() -> Optional[str]:
"""Return the current user name, or None if getuser() does not work
- in the current environment (see #1010).
- """
+ in the current environment (see #1010)."""
import getpass
try:
@@ -153,16 +147,14 @@ def pytest_configure(config: Config) -> None:
@pytest.fixture(scope="session")
def tmpdir_factory(request: FixtureRequest) -> TempdirFactory:
- """Return a :class:`_pytest.tmpdir.TempdirFactory` instance for the test session.
- """
+ """Return a :class:`_pytest.tmpdir.TempdirFactory` instance for the test session."""
# Set dynamically by pytest_configure() above.
return request.config._tmpdirhandler # type: ignore
@pytest.fixture(scope="session")
def tmp_path_factory(request: FixtureRequest) -> TempPathFactory:
- """Return a :class:`_pytest.tmpdir.TempPathFactory` instance for the test session.
- """
+ """Return a :class:`_pytest.tmpdir.TempPathFactory` instance for the test session."""
# Set dynamically by pytest_configure() above.
return request.config._tmp_path_factory # type: ignore
@@ -177,11 +169,11 @@ def _mk_tmp(request: FixtureRequest, factory: TempPathFactory) -> Path:
@pytest.fixture
def tmpdir(tmp_path: Path) -> py.path.local:
- """Return a temporary directory path object
- which is unique to each test function invocation,
- created as a sub directory of the base temporary
- directory. The returned object is a `py.path.local`_
- path object.
+ """Return a temporary directory path object which is unique to each test
+ function invocation, created as a sub directory of the base temporary
+ directory.
+
+ The returned object is a `py.path.local`_ path object.
.. _`py.path.local`: https://py.readthedocs.io/en/latest/path.html
"""
@@ -190,15 +182,15 @@ def tmpdir(tmp_path: Path) -> py.path.local:
@pytest.fixture
def tmp_path(request: FixtureRequest, tmp_path_factory: TempPathFactory) -> Path:
- """Return a temporary directory path object
- which is unique to each test function invocation,
- created as a sub directory of the base temporary
- directory. The returned object is a :class:`pathlib.Path`
- object.
+ """Return a temporary directory path object which is unique to each test
+ function invocation, created as a sub directory of the base temporary
+ directory.
+
+ The returned object is a :class:`pathlib.Path` object.
.. note::
- in python < 3.6 this is a pathlib2.Path
+ In python < 3.6 this is a pathlib2.Path.
"""
return _mk_tmp(request, tmp_path_factory)
diff --git a/src/_pytest/unittest.py b/src/_pytest/unittest.py
index 782a5c369..75c53a552 100644
--- a/src/_pytest/unittest.py
+++ b/src/_pytest/unittest.py
@@ -1,4 +1,4 @@
-""" discovery and running of std-library "unittest" style tests. """
+"""Discover and run std-library "unittest" style tests."""
import sys
import traceback
import types
@@ -46,7 +46,7 @@ if TYPE_CHECKING:
def pytest_pycollect_makeitem(
collector: PyCollector, name: str, obj: object
) -> Optional["UnitTestCase"]:
- # has unittest been imported and is obj a subclass of its TestCase?
+ # Has unittest been imported and is obj a subclass of its TestCase?
try:
ut = sys.modules["unittest"]
# Type ignored because `ut` is an opaque module.
@@ -54,14 +54,14 @@ def pytest_pycollect_makeitem(
return None
except Exception:
return None
- # yes, so let's collect it
+ # Yes, so let's collect it.
item = UnitTestCase.from_parent(collector, name=name, obj=obj) # type: UnitTestCase
return item
class UnitTestCase(Class):
- # marker for fixturemanger.getfixtureinfo()
- # to declare that our children do not support funcargs
+ # Marker for fixturemanger.getfixtureinfo()
+ # to declare that our children do not support funcargs.
nofuncargs = True
def collect(self) -> Iterable[Union[Item, Collector]]:
@@ -97,7 +97,7 @@ class UnitTestCase(Class):
def _inject_setup_teardown_fixtures(self, cls: type) -> None:
"""Injects a hidden auto-use fixture to invoke setUpClass/setup_method and corresponding
- teardown functions (#517)"""
+ teardown functions (#517)."""
class_fixture = _make_xunit_fixture(
cls, "setUpClass", "tearDownClass", scope="class", pass_self=False
)
@@ -145,7 +145,7 @@ class TestCaseFunction(Function):
_testcase = None # type: Optional[unittest.TestCase]
def setup(self) -> None:
- # a bound method to be called during teardown() if set (see 'runtest()')
+ # A bound method to be called during teardown() if set (see 'runtest()').
self._explicit_tearDown = None # type: Optional[Callable[[], None]]
assert self.parent is not None
self._testcase = self.parent.obj(self.name) # type: ignore[attr-defined]
@@ -164,12 +164,12 @@ class TestCaseFunction(Function):
pass
def _addexcinfo(self, rawexcinfo: "_SysExcInfoType") -> None:
- # unwrap potential exception info (see twisted trial support below)
+ # Unwrap potential exception info (see twisted trial support below).
rawexcinfo = getattr(rawexcinfo, "_rawexcinfo", rawexcinfo)
try:
excinfo = _pytest._code.ExceptionInfo(rawexcinfo) # type: ignore[arg-type]
- # invoke the attributes to trigger storing the traceback
- # trial causes some issue there
+ # Invoke the attributes to trigger storing the traceback
+ # trial causes some issue there.
excinfo.value
excinfo.traceback
except TypeError:
@@ -242,7 +242,7 @@ class TestCaseFunction(Function):
def _expecting_failure(self, test_method) -> bool:
"""Return True if the given unittest method (or the entire class) is marked
- with @expectedFailure"""
+ with @expectedFailure."""
expecting_failure_method = getattr(
test_method, "__unittest_expecting_failure__", False
)
@@ -256,23 +256,23 @@ class TestCaseFunction(Function):
maybe_wrap_pytest_function_for_tracing(self)
- # let the unittest framework handle async functions
+ # Let the unittest framework handle async functions.
if is_async_function(self.obj):
# Type ignored because self acts as the TestResult, but is not actually one.
self._testcase(result=self) # type: ignore[arg-type]
else:
- # when --pdb is given, we want to postpone calling tearDown() otherwise
+ # When --pdb is given, we want to postpone calling tearDown() otherwise
# when entering the pdb prompt, tearDown() would have probably cleaned up
- # instance variables, which makes it difficult to debug
- # arguably we could always postpone tearDown(), but this changes the moment where the
+ # instance variables, which makes it difficult to debug.
+ # Arguably we could always postpone tearDown(), but this changes the moment where the
# TestCase instance interacts with the results object, so better to only do it
- # when absolutely needed
+ # when absolutely needed.
if self.config.getoption("usepdb") and not _is_skipped(self.obj):
self._explicit_tearDown = self._testcase.tearDown
setattr(self._testcase, "tearDown", lambda *args: None)
- # we need to update the actual bound method with self.obj, because
- # wrap_pytest_function_for_tracing replaces self.obj by a wrapper
+ # We need to update the actual bound method with self.obj, because
+ # wrap_pytest_function_for_tracing replaces self.obj by a wrapper.
setattr(self._testcase, self.name, self.obj)
try:
self._testcase(result=self) # type: ignore[arg-type]
@@ -305,14 +305,14 @@ def pytest_runtest_makereport(item: Item, call: CallInfo[None]) -> None:
and isinstance(call.excinfo.value, unittest.SkipTest) # type: ignore[attr-defined]
):
excinfo = call.excinfo
- # let's substitute the excinfo with a pytest.skip one
+ # Let's substitute the excinfo with a pytest.skip one.
call2 = CallInfo[None].from_call(
lambda: pytest.skip(str(excinfo.value)), call.when
)
call.excinfo = call2.excinfo
-# twisted trial support
+# Twisted trial support.
@hookimpl(hookwrapper=True)
@@ -356,5 +356,5 @@ def check_testcase_implements_trial_reporter(done: List[int] = []) -> None:
def _is_skipped(obj) -> bool:
- """Return True if the given object has been marked with @unittest.skip"""
+ """Return True if the given object has been marked with @unittest.skip."""
return bool(getattr(obj, "__unittest_skip__", False))
diff --git a/src/_pytest/warning_types.py b/src/_pytest/warning_types.py
index 6f3b88da8..c93b96049 100644
--- a/src/_pytest/warning_types.py
+++ b/src/_pytest/warning_types.py
@@ -99,7 +99,7 @@ class UnformattedWarning(Generic[_W]):
template = attr.ib(type=str)
def format(self, **kwargs: Any) -> _W:
- """Returns an instance of the warning category, formatted with given kwargs"""
+ """Return an instance of the warning category, formatted with given kwargs."""
return self.category(self.template.format(**kwargs))
diff --git a/src/_pytest/warnings.py b/src/_pytest/warnings.py
index 3a8f2d8b3..0604aa60b 100644
--- a/src/_pytest/warnings.py
+++ b/src/_pytest/warnings.py
@@ -87,8 +87,7 @@ def catch_warnings_for_item(
when: "Literal['config', 'collect', 'runtest']",
item: Optional[Item],
) -> Generator[None, None, None]:
- """
- Context manager that catches warnings generated in the contained execution block.
+ """Context manager that catches warnings generated in the contained execution block.
``item`` can be None if we are not in the context of an item execution.
@@ -101,14 +100,14 @@ def catch_warnings_for_item(
assert log is not None
if not sys.warnoptions:
- # if user is not explicitly configuring warning filters, show deprecation warnings by default (#2908)
+ # If user is not explicitly configuring warning filters, show deprecation warnings by default (#2908).
warnings.filterwarnings("always", category=DeprecationWarning)
warnings.filterwarnings("always", category=PendingDeprecationWarning)
warnings.filterwarnings("error", category=pytest.PytestDeprecationWarning)
- # filters should have this precedence: mark, cmdline options, ini
- # filters should be applied in the inverse order of precedence
+ # Filters should have this precedence: mark, cmdline options, ini.
+ # Filters should be applied in the inverse order of precedence.
for arg in inifilters:
warnings.filterwarnings(*_parse_filter(arg, escape=False))
@@ -193,14 +192,16 @@ def pytest_sessionfinish(session: Session) -> Generator[None, None, None]:
def _issue_warning_captured(warning: Warning, hook, stacklevel: int) -> None:
- """
- This function should be used instead of calling ``warnings.warn`` directly when we are in the "configure" stage:
- at this point the actual options might not have been set, so we manually trigger the pytest_warning_recorded
- hook so we can display these warnings in the terminal. This is a hack until we can sort out #2891.
+ """A function that should be used instead of calling ``warnings.warn``
+ directly when we are in the "configure" stage.
+
+ At this point the actual options might not have been set, so we manually
+ trigger the pytest_warning_recorded hook so we can display these warnings
+ in the terminal. This is a hack until we can sort out #2891.
- :param warning: the warning instance.
- :param hook: the hook caller
- :param stacklevel: stacklevel forwarded to warnings.warn
+ :param warning: The warning instance.
+ :param hook: The hook caller.
+ :param stacklevel: stacklevel forwarded to warnings.warn.
"""
with warnings.catch_warnings(record=True) as records:
warnings.simplefilter("always", type(warning))
diff --git a/src/pytest/__init__.py b/src/pytest/__init__.py
index 64d6d1f23..c4c281918 100644
--- a/src/pytest/__init__.py
+++ b/src/pytest/__init__.py
@@ -1,7 +1,5 @@
# PYTHON_ARGCOMPLETE_OK
-"""
-pytest: unit and functional testing with Python.
-"""
+"""pytest: unit and functional testing with Python."""
from . import collect
from _pytest import __version__
from _pytest.assertion import register_assert_rewrite
diff --git a/src/pytest/__main__.py b/src/pytest/__main__.py
index 25b1e45b8..b17015293 100644
--- a/src/pytest/__main__.py
+++ b/src/pytest/__main__.py
@@ -1,6 +1,4 @@
-"""
-pytest entry point
-"""
+"""The pytest entry point."""
import pytest
if __name__ == "__main__":
diff --git a/testing/test_tmpdir.py b/testing/test_tmpdir.py
index 26a34c656..cc03385f3 100644
--- a/testing/test_tmpdir.py
+++ b/testing/test_tmpdir.py
@@ -2,12 +2,14 @@ import os
import stat
import sys
from typing import Callable
+from typing import cast
from typing import List
import attr
import pytest
from _pytest import pathlib
+from _pytest.config import Config
from _pytest.pathlib import cleanup_numbered_dir
from _pytest.pathlib import create_cleanup_lock
from _pytest.pathlib import make_numbered_dir
@@ -45,7 +47,7 @@ class FakeConfig:
class TestTempdirHandler:
def test_mktemp(self, tmp_path):
- config = FakeConfig(tmp_path)
+ config = cast(Config, FakeConfig(tmp_path))
t = TempdirFactory(TempPathFactory.from_config(config))
tmp = t.mktemp("world")
assert tmp.relto(t.getbasetemp()) == "world0"
@@ -58,7 +60,7 @@ class TestTempdirHandler:
def test_tmppath_relative_basetemp_absolute(self, tmp_path, monkeypatch):
"""#4425"""
monkeypatch.chdir(tmp_path)
- config = FakeConfig("hello")
+ config = cast(Config, FakeConfig("hello"))
t = TempPathFactory.from_config(config)
assert t.getbasetemp().resolve() == (tmp_path / "hello").resolve()