summaryrefslogtreecommitdiff
path: root/testing
diff options
context:
space:
mode:
Diffstat (limited to 'testing')
-rw-r--r--testing/acceptance_test.py82
-rw-r--r--testing/code/test_code.py57
-rw-r--r--testing/code/test_excinfo.py120
-rw-r--r--testing/code/test_source.py232
-rw-r--r--testing/conftest.py5
-rw-r--r--testing/deprecated_test.py31
-rw-r--r--testing/io/test_saferepr.py79
-rw-r--r--testing/logging/test_fixture.py2
-rw-r--r--testing/logging/test_formatter.py68
-rw-r--r--testing/logging/test_reporting.py16
-rw-r--r--testing/python/approx.py109
-rw-r--r--testing/python/collect.py4
-rw-r--r--testing/python/fixtures.py14
-rw-r--r--testing/python/metafunc.py25
-rw-r--r--testing/python/raises.py2
-rw-r--r--testing/python/show_fixtures_per_test.py4
-rw-r--r--testing/test_assertion.py148
-rw-r--r--testing/test_assertrewrite.py152
-rw-r--r--testing/test_cacheprovider.py249
-rw-r--r--testing/test_capture.py53
-rw-r--r--testing/test_collection.py43
-rw-r--r--testing/test_compat.py21
-rw-r--r--testing/test_config.py13
-rw-r--r--testing/test_conftest.py6
-rw-r--r--testing/test_doctest.py17
-rw-r--r--testing/test_faulthandler.py4
-rw-r--r--testing/test_junitxml.py33
-rw-r--r--testing/test_mark.py25
-rw-r--r--testing/test_meta.py1
-rw-r--r--testing/test_pdb.py36
-rw-r--r--testing/test_pluginmanager.py30
-rw-r--r--testing/test_pytester.py126
-rw-r--r--testing/test_reports.py6
-rw-r--r--testing/test_runner.py30
-rw-r--r--testing/test_runner_xunit.py4
-rw-r--r--testing/test_session.py18
-rw-r--r--testing/test_setuponly.py (renamed from testing/python/setup_only.py)2
-rw-r--r--testing/test_setupplan.py (renamed from testing/python/setup_plan.py)0
-rw-r--r--testing/test_skipping.py26
-rw-r--r--testing/test_stepwise.py2
-rw-r--r--testing/test_terminal.py259
-rw-r--r--testing/test_tmpdir.py14
-rw-r--r--testing/test_unittest.py38
-rw-r--r--testing/test_warnings.py13
44 files changed, 1461 insertions, 758 deletions
diff --git a/testing/acceptance_test.py b/testing/acceptance_test.py
index 284796a42..8f7be14be 100644
--- a/testing/acceptance_test.py
+++ b/testing/acceptance_test.py
@@ -178,8 +178,14 @@ class TestGeneralUsage:
p1 = testdir.makepyfile("")
p2 = testdir.makefile(".pyc", "123")
result = testdir.runpytest(p1, p2)
- assert result.ret
- result.stderr.fnmatch_lines(["*ERROR: not found:*{}".format(p2.basename)])
+ assert result.ret == ExitCode.USAGE_ERROR
+ result.stderr.fnmatch_lines(
+ [
+ "ERROR: not found: {}".format(p2),
+ "(no name {!r} in any of [[][]])".format(str(p2)),
+ "",
+ ]
+ )
@pytest.mark.filterwarnings("default")
def test_better_reporting_on_conftest_load_failure(self, testdir, request):
@@ -246,7 +252,7 @@ class TestGeneralUsage:
)
result = testdir.runpytest()
assert result.ret == ExitCode.NO_TESTS_COLLECTED
- assert "should not be seen" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*should not be seen*")
assert "stderr42" not in result.stderr.str()
def test_conftest_printing_shows_if_error(self, testdir):
@@ -628,7 +634,7 @@ class TestInvocationVariants:
result = testdir.runpytest("--pyargs", "tpkg.test_hello", syspathinsert=True)
assert result.ret != 0
- result.stdout.fnmatch_lines(["collected*0*items*/*1*errors"])
+ result.stdout.fnmatch_lines(["collected*0*items*/*1*error"])
def test_pyargs_only_imported_once(self, testdir):
pkg = testdir.mkpydir("foo")
@@ -858,16 +864,21 @@ class TestInvocationVariants:
4
""",
)
- result = testdir.runpytest("-rf")
- lines = result.stdout.str().splitlines()
- for line in lines:
- if line.startswith(("FAIL ", "FAILED ")):
- _fail, _sep, testid = line.partition(" ")
- break
- result = testdir.runpytest(testid, "-rf")
- result.stdout.fnmatch_lines(
- ["FAILED test_doctest_id.txt::test_doctest_id.txt", "*1 failed*"]
- )
+ testid = "test_doctest_id.txt::test_doctest_id.txt"
+ expected_lines = [
+ "*= FAILURES =*",
+ "*_ ?doctest? test_doctest_id.txt _*",
+ "FAILED test_doctest_id.txt::test_doctest_id.txt",
+ "*= 1 failed in*",
+ ]
+ result = testdir.runpytest(testid, "-rf", "--tb=short")
+ result.stdout.fnmatch_lines(expected_lines)
+
+ # Ensure that re-running it will still handle it as
+ # doctest.DocTestFailure, which was not the case before when
+ # re-importing doctest, but not creating a new RUNNER_CLASS.
+ result = testdir.runpytest(testid, "-rf", "--tb=short")
+ result.stdout.fnmatch_lines(expected_lines)
def test_core_backward_compatibility(self):
"""Test backward compatibility for get_plugin_manager function. See #787."""
@@ -950,10 +961,10 @@ class TestDurations:
testdir.makepyfile(test_collecterror="""xyz""")
result = testdir.runpytest("--durations=2", "-k test_1")
assert result.ret == 2
- result.stdout.fnmatch_lines(["*Interrupted: 1 errors during collection*"])
+ result.stdout.fnmatch_lines(["*Interrupted: 1 error during collection*"])
# Collection errors abort test execution, therefore no duration is
# output
- assert "duration" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*duration*")
def test_with_not(self, testdir):
testdir.makepyfile(self.source)
@@ -1007,7 +1018,7 @@ def test_zipimport_hook(testdir, tmpdir):
result = testdir.runpython(target)
assert result.ret == 0
result.stderr.fnmatch_lines(["*not found*foo*"])
- assert "INTERNALERROR>" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*INTERNALERROR>*")
def test_import_plugin_unicode_name(testdir):
@@ -1237,3 +1248,40 @@ def test_warn_on_async_gen_function(testdir):
assert (
result.stdout.str().count("async def functions are not natively supported") == 1
)
+
+
+def test_pdb_can_be_rewritten(testdir):
+ testdir.makepyfile(
+ **{
+ "conftest.py": """
+ import pytest
+ pytest.register_assert_rewrite("pdb")
+ """,
+ "__init__.py": "",
+ "pdb.py": """
+ def check():
+ assert 1 == 2
+ """,
+ "test_pdb.py": """
+ def test():
+ import pdb
+ assert pdb.check()
+ """,
+ }
+ )
+ # Disable debugging plugin itself to avoid:
+ # > INTERNALERROR> AttributeError: module 'pdb' has no attribute 'set_trace'
+ result = testdir.runpytest_subprocess("-p", "no:debugging", "-vv")
+ result.stdout.fnmatch_lines(
+ [
+ " def check():",
+ "> assert 1 == 2",
+ "E assert 1 == 2",
+ "E -1",
+ "E +2",
+ "",
+ "pdb.py:2: AssertionError",
+ "*= 1 failed in *",
+ ]
+ )
+ assert result.ret == 1
diff --git a/testing/code/test_code.py b/testing/code/test_code.py
index 2f55720b4..f8e1ce17f 100644
--- a/testing/code/test_code.py
+++ b/testing/code/test_code.py
@@ -1,18 +1,19 @@
import sys
+from types import FrameType
from unittest import mock
import _pytest._code
import pytest
-def test_ne():
+def test_ne() -> None:
code1 = _pytest._code.Code(compile('foo = "bar"', "", "exec"))
assert code1 == code1
code2 = _pytest._code.Code(compile('foo = "baz"', "", "exec"))
assert code2 != code1
-def test_code_gives_back_name_for_not_existing_file():
+def test_code_gives_back_name_for_not_existing_file() -> None:
name = "abc-123"
co_code = compile("pass\n", name, "exec")
assert co_code.co_filename == name
@@ -21,68 +22,67 @@ def test_code_gives_back_name_for_not_existing_file():
assert code.fullsource is None
-def test_code_with_class():
+def test_code_with_class() -> None:
class A:
pass
pytest.raises(TypeError, _pytest._code.Code, A)
-def x():
+def x() -> None:
raise NotImplementedError()
-def test_code_fullsource():
+def test_code_fullsource() -> None:
code = _pytest._code.Code(x)
full = code.fullsource
assert "test_code_fullsource()" in str(full)
-def test_code_source():
+def test_code_source() -> None:
code = _pytest._code.Code(x)
src = code.source()
- expected = """def x():
+ expected = """def x() -> None:
raise NotImplementedError()"""
assert str(src) == expected
-def test_frame_getsourcelineno_myself():
- def func():
+def test_frame_getsourcelineno_myself() -> None:
+ def func() -> FrameType:
return sys._getframe(0)
- f = func()
- f = _pytest._code.Frame(f)
+ f = _pytest._code.Frame(func())
source, lineno = f.code.fullsource, f.lineno
+ assert source is not None
assert source[lineno].startswith(" return sys._getframe(0)")
-def test_getstatement_empty_fullsource():
- def func():
+def test_getstatement_empty_fullsource() -> None:
+ def func() -> FrameType:
return sys._getframe(0)
- f = func()
- f = _pytest._code.Frame(f)
+ f = _pytest._code.Frame(func())
with mock.patch.object(f.code.__class__, "fullsource", None):
assert f.statement == ""
-def test_code_from_func():
+def test_code_from_func() -> None:
co = _pytest._code.Code(test_frame_getsourcelineno_myself)
assert co.firstlineno
assert co.path
-def test_unicode_handling():
+def test_unicode_handling() -> None:
value = "ąć".encode()
- def f():
+ def f() -> None:
raise Exception(value)
excinfo = pytest.raises(Exception, f)
str(excinfo)
-def test_code_getargs():
+def test_code_getargs() -> None:
def f1(x):
raise NotImplementedError()
@@ -108,26 +108,26 @@ def test_code_getargs():
assert c4.getargs(var=True) == ("x", "y", "z")
-def test_frame_getargs():
- def f1(x):
+def test_frame_getargs() -> None:
+ def f1(x) -> FrameType:
return sys._getframe(0)
fr1 = _pytest._code.Frame(f1("a"))
assert fr1.getargs(var=True) == [("x", "a")]
- def f2(x, *y):
+ def f2(x, *y) -> FrameType:
return sys._getframe(0)
fr2 = _pytest._code.Frame(f2("a", "b", "c"))
assert fr2.getargs(var=True) == [("x", "a"), ("y", ("b", "c"))]
- def f3(x, **z):
+ def f3(x, **z) -> FrameType:
return sys._getframe(0)
fr3 = _pytest._code.Frame(f3("a", b="c"))
assert fr3.getargs(var=True) == [("x", "a"), ("z", {"b": "c"})]
- def f4(x, *y, **z):
+ def f4(x, *y, **z) -> FrameType:
return sys._getframe(0)
fr4 = _pytest._code.Frame(f4("a", "b", c="d"))
@@ -135,7 +135,7 @@ def test_frame_getargs():
class TestExceptionInfo:
- def test_bad_getsource(self):
+ def test_bad_getsource(self) -> None:
try:
if False:
pass
@@ -145,13 +145,13 @@ class TestExceptionInfo:
exci = _pytest._code.ExceptionInfo.from_current()
assert exci.getrepr()
- def test_from_current_with_missing(self):
+ def test_from_current_with_missing(self) -> None:
with pytest.raises(AssertionError, match="no current exception"):
_pytest._code.ExceptionInfo.from_current()
class TestTracebackEntry:
- def test_getsource(self):
+ def test_getsource(self) -> None:
try:
if False:
pass
@@ -161,12 +161,13 @@ class TestTracebackEntry:
exci = _pytest._code.ExceptionInfo.from_current()
entry = exci.traceback[0]
source = entry.getsource()
+ assert source is not None
assert len(source) == 6
assert "assert False" in source[5]
class TestReprFuncArgs:
- def test_not_raise_exception_with_mixed_encoding(self, tw_mock):
+ def test_not_raise_exception_with_mixed_encoding(self, tw_mock) -> None:
from _pytest._code.code import ReprFuncArgs
args = [("unicode_string", "São Paulo"), ("utf8_string", b"S\xc3\xa3o Paulo")]
diff --git a/testing/code/test_excinfo.py b/testing/code/test_excinfo.py
index 5673b811b..997b14e2f 100644
--- a/testing/code/test_excinfo.py
+++ b/testing/code/test_excinfo.py
@@ -3,6 +3,7 @@ import os
import queue
import sys
import textwrap
+from typing import Union
import py
@@ -59,9 +60,9 @@ def test_excinfo_getstatement():
except ValueError:
excinfo = _pytest._code.ExceptionInfo.from_current()
linenumbers = [
- _pytest._code.getrawcode(f).co_firstlineno - 1 + 4,
- _pytest._code.getrawcode(f).co_firstlineno - 1 + 1,
- _pytest._code.getrawcode(g).co_firstlineno - 1 + 1,
+ f.__code__.co_firstlineno - 1 + 4,
+ f.__code__.co_firstlineno - 1 + 1,
+ g.__code__.co_firstlineno - 1 + 1,
]
values = list(excinfo.traceback)
foundlinenumbers = [x.lineno for x in values]
@@ -224,23 +225,25 @@ class TestTraceback_f_g_h:
repr = excinfo.getrepr()
assert "RuntimeError: hello" in str(repr.reprcrash)
- def test_traceback_no_recursion_index(self):
- def do_stuff():
+ def test_traceback_no_recursion_index(self) -> None:
+ def do_stuff() -> None:
raise RuntimeError
- def reraise_me():
+ def reraise_me() -> None:
import sys
exc, val, tb = sys.exc_info()
+ assert val is not None
raise val.with_traceback(tb)
- def f(n):
+ def f(n: int) -> None:
try:
do_stuff()
except: # noqa
reraise_me()
excinfo = pytest.raises(RuntimeError, f, 8)
+ assert excinfo is not None
traceback = excinfo.traceback
recindex = traceback.recursionindex()
assert recindex is None
@@ -316,8 +319,19 @@ def test_excinfo_exconly():
def test_excinfo_repr_str():
excinfo = pytest.raises(ValueError, h)
- assert repr(excinfo) == "<ExceptionInfo ValueError tblen=4>"
- assert str(excinfo) == "<ExceptionInfo ValueError tblen=4>"
+ assert repr(excinfo) == "<ExceptionInfo ValueError() tblen=4>"
+ assert str(excinfo) == "<ExceptionInfo ValueError() tblen=4>"
+
+ class CustomException(Exception):
+ def __repr__(self):
+ return "custom_repr"
+
+ def raises():
+ raise CustomException()
+
+ excinfo = pytest.raises(CustomException, raises)
+ assert repr(excinfo) == "<ExceptionInfo custom_repr tblen=2>"
+ assert str(excinfo) == "<ExceptionInfo custom_repr tblen=2>"
def test_excinfo_for_later():
@@ -399,7 +413,7 @@ def test_match_raises_error(testdir):
result = testdir.runpytest()
assert result.ret != 0
result.stdout.fnmatch_lines(["*AssertionError*Pattern*[123]*not found*"])
- assert "__tracebackhide__ = True" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*__tracebackhide__ = True*")
result = testdir.runpytest("--fulltrace")
assert result.ret != 0
@@ -491,65 +505,18 @@ raise ValueError()
assert repr.reprtraceback.reprentries[1].lines[0] == "> ???"
assert repr.chain[0][0].reprentries[1].lines[0] == "> ???"
- def test_repr_source_failing_fullsource(self):
+ def test_repr_source_failing_fullsource(self, monkeypatch) -> None:
pr = FormattedExcinfo()
- class FakeCode:
- class raw:
- co_filename = "?"
-
- path = "?"
- firstlineno = 5
-
- def fullsource(self):
- return None
-
- fullsource = property(fullsource)
-
- class FakeFrame:
- code = FakeCode()
- f_locals = {}
- f_globals = {}
-
- class FakeTracebackEntry(_pytest._code.Traceback.Entry):
- def __init__(self, tb, excinfo=None):
- self.lineno = 5 + 3
-
- @property
- def frame(self):
- return FakeFrame()
-
- class Traceback(_pytest._code.Traceback):
- Entry = FakeTracebackEntry
-
- class FakeExcinfo(_pytest._code.ExceptionInfo):
- typename = "Foo"
- value = Exception()
-
- def __init__(self):
- pass
-
- def exconly(self, tryshort):
- return "EXC"
-
- def errisinstance(self, cls):
- return False
-
- excinfo = FakeExcinfo()
-
- class FakeRawTB:
- tb_next = None
-
- tb = FakeRawTB()
- excinfo.traceback = Traceback(tb)
+ try:
+ 1 / 0
+ except ZeroDivisionError:
+ excinfo = ExceptionInfo.from_current()
- fail = IOError()
- repr = pr.repr_excinfo(excinfo)
- assert repr.reprtraceback.reprentries[0].lines[0] == "> ???"
- assert repr.chain[0][0].reprentries[0].lines[0] == "> ???"
+ with monkeypatch.context() as m:
+ m.setattr(_pytest._code.Code, "fullsource", property(lambda self: None))
+ repr = pr.repr_excinfo(excinfo)
- fail = py.error.ENOENT # noqa
- repr = pr.repr_excinfo(excinfo)
assert repr.reprtraceback.reprentries[0].lines[0] == "> ???"
assert repr.chain[0][0].reprentries[0].lines[0] == "> ???"
@@ -573,7 +540,7 @@ raise ValueError()
reprlocals = p.repr_locals(loc)
assert reprlocals.lines
assert reprlocals.lines[0] == "__builtins__ = <builtins>"
- assert '[NotImplementedError("") raised in repr()]' in reprlocals.lines[1]
+ assert "[NotImplementedError() raised in repr()]" in reprlocals.lines[1]
def test_repr_local_with_exception_in_class_property(self):
class ExceptionWithBrokenClass(Exception):
@@ -591,7 +558,7 @@ raise ValueError()
reprlocals = p.repr_locals(loc)
assert reprlocals.lines
assert reprlocals.lines[0] == "__builtins__ = <builtins>"
- assert '[ExceptionWithBrokenClass("") raised in repr()]' in reprlocals.lines[1]
+ assert "[ExceptionWithBrokenClass() raised in repr()]" in reprlocals.lines[1]
def test_repr_local_truncated(self):
loc = {"l": [i for i in range(10)]}
@@ -632,7 +599,6 @@ raise ValueError()
assert lines[3] == "E world"
assert not lines[4:]
- loc = repr_entry.reprlocals is not None
loc = repr_entry.reprfileloc
assert loc.path == mod.__file__
assert loc.lineno == 3
@@ -891,7 +857,7 @@ raise ValueError()
from _pytest._code.code import TerminalRepr
class MyRepr(TerminalRepr):
- def toterminal(self, tw):
+ def toterminal(self, tw) -> None:
tw.line("я")
x = str(MyRepr())
@@ -1218,13 +1184,15 @@ raise ValueError()
@pytest.mark.parametrize(
"reason, description",
[
- (
+ pytest.param(
"cause",
"The above exception was the direct cause of the following exception:",
+ id="cause",
),
- (
+ pytest.param(
"context",
"During handling of the above exception, another exception occurred:",
+ id="context",
),
],
)
@@ -1320,9 +1288,10 @@ raise ValueError()
@pytest.mark.parametrize("style", ["short", "long"])
@pytest.mark.parametrize("encoding", [None, "utf8", "utf16"])
def test_repr_traceback_with_unicode(style, encoding):
- msg = "☹"
- if encoding is not None:
- msg = msg.encode(encoding)
+ if encoding is None:
+ msg = "☹" # type: Union[str, bytes]
+ else:
+ msg = "☹".encode(encoding)
try:
raise RuntimeError(msg)
except RuntimeError:
@@ -1343,7 +1312,8 @@ def test_cwd_deleted(testdir):
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["* 1 failed in *"])
- assert "INTERNALERROR" not in result.stdout.str() + result.stderr.str()
+ result.stdout.no_fnmatch_line("*INTERNALERROR*")
+ result.stderr.no_fnmatch_line("*INTERNALERROR*")
@pytest.mark.usefixtures("limited_recursion_depth")
diff --git a/testing/code/test_source.py b/testing/code/test_source.py
index 15e0bf24a..bf52dccd7 100644
--- a/testing/code/test_source.py
+++ b/testing/code/test_source.py
@@ -4,13 +4,16 @@
import ast
import inspect
import sys
+from typing import Any
+from typing import Dict
+from typing import Optional
import _pytest._code
import pytest
from _pytest._code import Source
-def test_source_str_function():
+def test_source_str_function() -> None:
x = Source("3")
assert str(x) == "3"
@@ -25,7 +28,7 @@ def test_source_str_function():
assert str(x) == "\n3"
-def test_unicode():
+def test_unicode() -> None:
x = Source("4")
assert str(x) == "4"
co = _pytest._code.compile('"å"', mode="eval")
@@ -33,12 +36,12 @@ def test_unicode():
assert isinstance(val, str)
-def test_source_from_function():
+def test_source_from_function() -> None:
source = _pytest._code.Source(test_source_str_function)
- assert str(source).startswith("def test_source_str_function():")
+ assert str(source).startswith("def test_source_str_function() -> None:")
-def test_source_from_method():
+def test_source_from_method() -> None:
class TestClass:
def test_method(self):
pass
@@ -47,13 +50,13 @@ def test_source_from_method():
assert source.lines == ["def test_method(self):", " pass"]
-def test_source_from_lines():
+def test_source_from_lines() -> None:
lines = ["a \n", "b\n", "c"]
source = _pytest._code.Source(lines)
assert source.lines == ["a ", "b", "c"]
-def test_source_from_inner_function():
+def test_source_from_inner_function() -> None:
def f():
pass
@@ -63,7 +66,7 @@ def test_source_from_inner_function():
assert str(source).startswith("def f():")
-def test_source_putaround_simple():
+def test_source_putaround_simple() -> None:
source = Source("raise ValueError")
source = source.putaround(
"try:",
@@ -85,7 +88,7 @@ else:
)
-def test_source_putaround():
+def test_source_putaround() -> None:
source = Source()
source = source.putaround(
"""
@@ -96,28 +99,29 @@ def test_source_putaround():
assert str(source).strip() == "if 1:\n x=1"
-def test_source_strips():
+def test_source_strips() -> None:
source = Source("")
assert source == Source()
assert str(source) == ""
assert source.strip() == source
-def test_source_strip_multiline():
+def test_source_strip_multiline() -> None:
source = Source()
source.lines = ["", " hello", " "]
source2 = source.strip()
assert source2.lines == [" hello"]
-def test_syntaxerror_rerepresentation():
+def test_syntaxerror_rerepresentation() -> None:
ex = pytest.raises(SyntaxError, _pytest._code.compile, "xyz xyz")
+ assert ex is not None
assert ex.value.lineno == 1
assert ex.value.offset in {5, 7} # cpython: 7, pypy3.6 7.1.1: 5
- assert ex.value.text.strip(), "x x"
+ assert ex.value.text == "xyz xyz\n"
-def test_isparseable():
+def test_isparseable() -> None:
assert Source("hello").isparseable()
assert Source("if 1:\n pass").isparseable()
assert Source(" \nif 1:\n pass").isparseable()
@@ -127,56 +131,58 @@ def test_isparseable():
class TestAccesses:
- source = Source(
- """\
- def f(x):
- pass
- def g(x):
- pass
- """
- )
+ def setup_class(self) -> None:
+ self.source = Source(
+ """\
+ def f(x):
+ pass
+ def g(x):
+ pass
+ """
+ )
- def test_getrange(self):
+ def test_getrange(self) -> None:
x = self.source[0:2]
assert x.isparseable()
assert len(x.lines) == 2
assert str(x) == "def f(x):\n pass"
- def test_getline(self):
+ def test_getline(self) -> None:
x = self.source[0]
assert x == "def f(x):"
- def test_len(self):
+ def test_len(self) -> None:
assert len(self.source) == 4
- def test_iter(self):
+ def test_iter(self) -> None:
values = [x for x in self.source]
assert len(values) == 4
class TestSourceParsingAndCompiling:
- source = Source(
- """\
- def f(x):
- assert (x ==
- 3 +
- 4)
- """
- ).strip()
+ def setup_class(self) -> None:
+ self.source = Source(
+ """\
+ def f(x):
+ assert (x ==
+ 3 +
+ 4)
+ """
+ ).strip()
- def test_compile(self):
+ def test_compile(self) -> None:
co = _pytest._code.compile("x=3")
- d = {}
+ d = {} # type: Dict[str, Any]
exec(co, d)
assert d["x"] == 3
- def test_compile_and_getsource_simple(self):
+ def test_compile_and_getsource_simple(self) -> None:
co = _pytest._code.compile("x=3")
exec(co)
source = _pytest._code.Source(co)
assert str(source) == "x=3"
- def test_compile_and_getsource_through_same_function(self):
+ def test_compile_and_getsource_through_same_function(self) -> None:
def gensource(source):
return _pytest._code.compile(source)
@@ -197,7 +203,7 @@ class TestSourceParsingAndCompiling:
source2 = inspect.getsource(co2)
assert "ValueError" in source2
- def test_getstatement(self):
+ def test_getstatement(self) -> None:
# print str(self.source)
ass = str(self.source[1:])
for i in range(1, 4):
@@ -206,7 +212,7 @@ class TestSourceParsingAndCompiling:
# x = s.deindent()
assert str(s) == ass
- def test_getstatementrange_triple_quoted(self):
+ def test_getstatementrange_triple_quoted(self) -> None:
# print str(self.source)
source = Source(
"""hello('''
@@ -217,7 +223,7 @@ class TestSourceParsingAndCompiling:
s = source.getstatement(1)
assert s == str(source)
- def test_getstatementrange_within_constructs(self):
+ def test_getstatementrange_within_constructs(self) -> None:
source = Source(
"""\
try:
@@ -239,7 +245,7 @@ class TestSourceParsingAndCompiling:
# assert source.getstatementrange(5) == (0, 7)
assert source.getstatementrange(6) == (6, 7)
- def test_getstatementrange_bug(self):
+ def test_getstatementrange_bug(self) -> None:
source = Source(
"""\
try:
@@ -253,7 +259,7 @@ class TestSourceParsingAndCompiling:
assert len(source) == 6
assert source.getstatementrange(2) == (1, 4)
- def test_getstatementrange_bug2(self):
+ def test_getstatementrange_bug2(self) -> None:
source = Source(
"""\
assert (
@@ -270,7 +276,7 @@ class TestSourceParsingAndCompiling:
assert len(source) == 9
assert source.getstatementrange(5) == (0, 9)
- def test_getstatementrange_ast_issue58(self):
+ def test_getstatementrange_ast_issue58(self) -> None:
source = Source(
"""\
@@ -284,38 +290,44 @@ class TestSourceParsingAndCompiling:
assert getstatement(2, source).lines == source.lines[2:3]
assert getstatement(3, source).lines == source.lines[3:4]
- def test_getstatementrange_out_of_bounds_py3(self):
+ def test_getstatementrange_out_of_bounds_py3(self) -> None:
source = Source("if xxx:\n from .collections import something")
r = source.getstatementrange(1)
assert r == (1, 2)
- def test_getstatementrange_with_syntaxerror_issue7(self):
+ def test_getstatementrange_with_syntaxerror_issue7(self) -> None:
source = Source(":")
pytest.raises(SyntaxError, lambda: source.getstatementrange(0))
- def test_compile_to_ast(self):
+ def test_compile_to_ast(self) -> None:
source = Source("x = 4")
mod = source.compile(flag=ast.PyCF_ONLY_AST)
assert isinstance(mod, ast.Module)
compile(mod, "<filename>", "exec")
- def test_compile_and_getsource(self):
+ def test_compile_and_getsource(self) -> None:
co = self.source.compile()
exec(co, globals())
- f(7)
- excinfo = pytest.raises(AssertionError, f, 6)
+ f(7) # type: ignore
+ excinfo = pytest.raises(AssertionError, f, 6) # type: ignore
+ assert excinfo is not None
frame = excinfo.traceback[-1].frame
+ assert isinstance(frame.code.fullsource, Source)
stmt = frame.code.fullsource.getstatement(frame.lineno)
assert str(stmt).strip().startswith("assert")
@pytest.mark.parametrize("name", ["", None, "my"])
- def test_compilefuncs_and_path_sanity(self, name):
+ def test_compilefuncs_and_path_sanity(self, name: Optional[str]) -> None:
def check(comp, name):
co = comp(self.source, name)
if not name:
- expected = "codegen %s:%d>" % (mypath, mylineno + 2 + 2)
+ expected = "codegen %s:%d>" % (mypath, mylineno + 2 + 2) # type: ignore
else:
- expected = "codegen %r %s:%d>" % (name, mypath, mylineno + 2 + 2)
+ expected = "codegen %r %s:%d>" % (
+ name,
+ mypath, # type: ignore
+ mylineno + 2 + 2, # type: ignore
+ ) # type: ignore
fn = co.co_filename
assert fn.endswith(expected)
@@ -330,9 +342,9 @@ class TestSourceParsingAndCompiling:
pytest.raises(SyntaxError, _pytest._code.compile, "lambda a,a: 0", mode="eval")
-def test_getstartingblock_singleline():
+def test_getstartingblock_singleline() -> None:
class A:
- def __init__(self, *args):
+ def __init__(self, *args) -> None:
frame = sys._getframe(1)
self.source = _pytest._code.Frame(frame).statement
@@ -342,22 +354,22 @@ def test_getstartingblock_singleline():
assert len(values) == 1
-def test_getline_finally():
- def c():
+def test_getline_finally() -> None:
+ def c() -> None:
pass
with pytest.raises(TypeError) as excinfo:
teardown = None
try:
- c(1)
+ c(1) # type: ignore
finally:
if teardown:
teardown()
source = excinfo.traceback[-1].statement
- assert str(source).strip() == "c(1)"
+ assert str(source).strip() == "c(1) # type: ignore"
-def test_getfuncsource_dynamic():
+def test_getfuncsource_dynamic() -> None:
source = """
def f():
raise ValueError
@@ -366,11 +378,13 @@ def test_getfuncsource_dynamic():
"""
co = _pytest._code.compile(source)
exec(co, globals())
- assert str(_pytest._code.Source(f)).strip() == "def f():\n raise ValueError"
- assert str(_pytest._code.Source(g)).strip() == "def g(): pass"
+ f_source = _pytest._code.Source(f) # type: ignore
+ g_source = _pytest._code.Source(g) # type: ignore
+ assert str(f_source).strip() == "def f():\n raise ValueError"
+ assert str(g_source).strip() == "def g(): pass"
-def test_getfuncsource_with_multine_string():
+def test_getfuncsource_with_multine_string() -> None:
def f():
c = """while True:
pass
@@ -385,7 +399,7 @@ def test_getfuncsource_with_multine_string():
assert str(_pytest._code.Source(f)) == expected.rstrip()
-def test_deindent():
+def test_deindent() -> None:
from _pytest._code.source import deindent as deindent
assert deindent(["\tfoo", "\tbar"]) == ["foo", "bar"]
@@ -399,7 +413,7 @@ def test_deindent():
assert lines == ["def f():", " def g():", " pass"]
-def test_source_of_class_at_eof_without_newline(tmpdir, _sys_snapshot):
+def test_source_of_class_at_eof_without_newline(tmpdir, _sys_snapshot) -> None:
# this test fails because the implicit inspect.getsource(A) below
# does not return the "x = 1" last line.
source = _pytest._code.Source(
@@ -421,7 +435,7 @@ if True:
pass
-def test_getsource_fallback():
+def test_getsource_fallback() -> None:
from _pytest._code.source import getsource
expected = """def x():
@@ -430,7 +444,7 @@ def test_getsource_fallback():
assert src == expected
-def test_idem_compile_and_getsource():
+def test_idem_compile_and_getsource() -> None:
from _pytest._code.source import getsource
expected = "def x(): pass"
@@ -439,15 +453,16 @@ def test_idem_compile_and_getsource():
assert src == expected
-def test_findsource_fallback():
+def test_findsource_fallback() -> None:
from _pytest._code.source import findsource
src, lineno = findsource(x)
+ assert src is not None
assert "test_findsource_simple" in str(src)
assert src[lineno] == " def x():"
-def test_findsource():
+def test_findsource() -> None:
from _pytest._code.source import findsource
co = _pytest._code.compile(
@@ -458,25 +473,27 @@ def test_findsource():
)
src, lineno = findsource(co)
+ assert src is not None
assert "if 1:" in str(src)
- d = {}
+ d = {} # type: Dict[str, Any]
eval(co, d)
src, lineno = findsource(d["x"])
+ assert src is not None
assert "if 1:" in str(src)
assert src[lineno] == " def x():"
-def test_getfslineno():
+def test_getfslineno() -> None:
from _pytest._code import getfslineno
- def f(x):
+ def f(x) -> None:
pass
fspath, lineno = getfslineno(f)
assert fspath.basename == "test_source.py"
- assert lineno == _pytest._code.getrawcode(f).co_firstlineno - 1 # see findsource
+ assert lineno == f.__code__.co_firstlineno - 1 # see findsource
class A:
pass
@@ -496,40 +513,40 @@ def test_getfslineno():
assert getfslineno(B)[1] == -1
-def test_code_of_object_instance_with_call():
+def test_code_of_object_instance_with_call() -> None:
class A:
pass
pytest.raises(TypeError, lambda: _pytest._code.Source(A()))
class WithCall:
- def __call__(self):
+ def __call__(self) -> None:
pass
code = _pytest._code.Code(WithCall())
assert "pass" in str(code.source())
class Hello:
- def __call__(self):
+ def __call__(self) -> None:
pass
pytest.raises(TypeError, lambda: _pytest._code.Code(Hello))
-def getstatement(lineno, source):
+def getstatement(lineno: int, source) -> Source:
from _pytest._code.source import getstatementrange_ast
- source = _pytest._code.Source(source, deindent=False)
- ast, start, end = getstatementrange_ast(lineno, source)
- return source[start:end]
+ src = _pytest._code.Source(source, deindent=False)
+ ast, start, end = getstatementrange_ast(lineno, src)
+ return src[start:end]
-def test_oneline():
+def test_oneline() -> None:
source = getstatement(0, "raise ValueError")
assert str(source) == "raise ValueError"
-def test_comment_and_no_newline_at_end():
+def test_comment_and_no_newline_at_end() -> None:
from _pytest._code.source import getstatementrange_ast
source = Source(
@@ -543,12 +560,12 @@ def test_comment_and_no_newline_at_end():
assert end == 2
-def test_oneline_and_comment():
+def test_oneline_and_comment() -> None:
source = getstatement(0, "raise ValueError\n#hello")
assert str(source) == "raise ValueError"
-def test_comments():
+def test_comments() -> None:
source = '''def test():
"comment 1"
x = 1
@@ -574,7 +591,7 @@ comment 4
assert str(getstatement(line, source)) == '"""\ncomment 4\n"""'
-def test_comment_in_statement():
+def test_comment_in_statement() -> None:
source = """test(foo=1,
# comment 1
bar=2)
@@ -586,17 +603,17 @@ def test_comment_in_statement():
)
-def test_single_line_else():
+def test_single_line_else() -> None:
source = getstatement(1, "if False: 2\nelse: 3")
assert str(source) == "else: 3"
-def test_single_line_finally():
+def test_single_line_finally() -> None:
source = getstatement(1, "try: 1\nfinally: 3")
assert str(source) == "finally: 3"
-def test_issue55():
+def test_issue55() -> None:
source = (
"def round_trip(dinp):\n assert 1 == dinp\n"
'def test_rt():\n round_trip("""\n""")\n'
@@ -605,7 +622,7 @@ def test_issue55():
assert str(s) == ' round_trip("""\n""")'
-def test_multiline():
+def test_multiline() -> None:
source = getstatement(
0,
"""\
@@ -619,7 +636,8 @@ x = 3
class TestTry:
- source = """\
+ def setup_class(self) -> None:
+ self.source = """\
try:
raise ValueError
except Something:
@@ -628,42 +646,44 @@ else:
raise KeyError()
"""
- def test_body(self):
+ def test_body(self) -> None:
source = getstatement(1, self.source)
assert str(source) == " raise ValueError"
- def test_except_line(self):
+ def test_except_line(self) -> None:
source = getstatement(2, self.source)
assert str(source) == "except Something:"
- def test_except_body(self):
+ def test_except_body(self) -> None:
source = getstatement(3, self.source)
assert str(source) == " raise IndexError(1)"
- def test_else(self):
+ def test_else(self) -> None:
source = getstatement(5, self.source)
assert str(source) == " raise KeyError()"
class TestTryFinally:
- source = """\
+ def setup_class(self) -> None:
+ self.source = """\
try:
raise ValueError
finally:
raise IndexError(1)
"""
- def test_body(self):
+ def test_body(self) -> None:
source = getstatement(1, self.source)
assert str(source) == " raise ValueError"
- def test_finally(self):
+ def test_finally(self) -> None:
source = getstatement(3, self.source)
assert str(source) == " raise IndexError(1)"
class TestIf:
- source = """\
+ def setup_class(self) -> None:
+ self.source = """\
if 1:
y = 3
elif False:
@@ -672,24 +692,24 @@ else:
y = 7
"""
- def test_body(self):
+ def test_body(self) -> None:
source = getstatement(1, self.source)
assert str(source) == " y = 3"
- def test_elif_clause(self):
+ def test_elif_clause(self) -> None:
source = getstatement(2, self.source)
assert str(source) == "elif False:"
- def test_elif(self):
+ def test_elif(self) -> None:
source = getstatement(3, self.source)
assert str(source) == " y = 5"
- def test_else(self):
+ def test_else(self) -> None:
source = getstatement(5, self.source)
assert str(source) == " y = 7"
-def test_semicolon():
+def test_semicolon() -> None:
s = """\
hello ; pytest.skip()
"""
@@ -697,7 +717,7 @@ hello ; pytest.skip()
assert str(source) == s.strip()
-def test_def_online():
+def test_def_online() -> None:
s = """\
def func(): raise ValueError(42)
@@ -708,7 +728,7 @@ def something():
assert str(source) == "def func(): raise ValueError(42)"
-def XXX_test_expression_multiline():
+def XXX_test_expression_multiline() -> None:
source = """\
something
'''
@@ -717,7 +737,7 @@ something
assert str(result) == "'''\n'''"
-def test_getstartingblock_multiline():
+def test_getstartingblock_multiline() -> None:
class A:
def __init__(self, *args):
frame = sys._getframe(1)
diff --git a/testing/conftest.py b/testing/conftest.py
index a03efb0cf..8b0430f69 100644
--- a/testing/conftest.py
+++ b/testing/conftest.py
@@ -39,9 +39,12 @@ def pytest_collection_modifyitems(config, items):
neutral_items.append(item)
else:
if "testdir" in fixtures:
- if spawn_names.intersection(item.function.__code__.co_names):
+ co_names = item.function.__code__.co_names
+ if spawn_names.intersection(co_names):
item.add_marker(pytest.mark.uses_pexpect)
slowest_items.append(item)
+ elif "runpytest_subprocess" in co_names:
+ slowest_items.append(item)
else:
slow_items.append(item)
item.add_marker(pytest.mark.slow)
diff --git a/testing/deprecated_test.py b/testing/deprecated_test.py
index b8a22428f..5390d038d 100644
--- a/testing/deprecated_test.py
+++ b/testing/deprecated_test.py
@@ -16,7 +16,7 @@ def test_resultlog_is_deprecated(testdir):
result = testdir.runpytest("--result-log=%s" % testdir.tmpdir.join("result.log"))
result.stdout.fnmatch_lines(
[
- "*--result-log is deprecated and scheduled for removal in pytest 6.0*",
+ "*--result-log is deprecated, please try the new pytest-reportlog plugin.",
"*See https://docs.pytest.org/en/latest/deprecations.html#result-log-result-log for more information*",
]
)
@@ -44,3 +44,32 @@ def test_external_plugins_integrated(testdir, plugin):
with pytest.warns(pytest.PytestConfigWarning):
testdir.parseconfig("-p", plugin)
+
+
+@pytest.mark.parametrize("junit_family", [None, "legacy", "xunit2"])
+def test_warn_about_imminent_junit_family_default_change(testdir, junit_family):
+ """Show a warning if junit_family is not defined and --junitxml is used (#6179)"""
+ testdir.makepyfile(
+ """
+ def test_foo():
+ pass
+ """
+ )
+ if junit_family:
+ testdir.makeini(
+ """
+ [pytest]
+ junit_family={junit_family}
+ """.format(
+ junit_family=junit_family
+ )
+ )
+
+ result = testdir.runpytest("--junit-xml=foo.xml")
+ warning_msg = (
+ "*PytestDeprecationWarning: The 'junit_family' default value will change*"
+ )
+ if junit_family:
+ result.stdout.no_fnmatch_line(warning_msg)
+ else:
+ result.stdout.fnmatch_lines([warning_msg])
diff --git a/testing/io/test_saferepr.py b/testing/io/test_saferepr.py
index 86897b57c..e24d9b470 100644
--- a/testing/io/test_saferepr.py
+++ b/testing/io/test_saferepr.py
@@ -1,3 +1,4 @@
+import pytest
from _pytest._io.saferepr import saferepr
@@ -40,9 +41,81 @@ def test_exceptions():
assert "TypeError" in s
assert "TypeError" in saferepr(BrokenRepr("string"))
- s2 = saferepr(BrokenRepr(BrokenReprException("omg even worse")))
- assert "NameError" not in s2
- assert "unknown" in s2
+ none = None
+ try:
+ none()
+ except BaseException as exc:
+ exp_exc = repr(exc)
+ obj = BrokenRepr(BrokenReprException("omg even worse"))
+ s2 = saferepr(obj)
+ assert s2 == (
+ "<[unpresentable exception ({!s}) raised in repr()] BrokenRepr object at 0x{:x}>".format(
+ exp_exc, id(obj)
+ )
+ )
+
+
+def test_baseexception():
+ """Test saferepr() with BaseExceptions, which includes pytest outcomes."""
+
+ class RaisingOnStrRepr(BaseException):
+ def __init__(self, exc_types):
+ self.exc_types = exc_types
+
+ def raise_exc(self, *args):
+ try:
+ self.exc_type = self.exc_types.pop(0)
+ except IndexError:
+ pass
+ if hasattr(self.exc_type, "__call__"):
+ raise self.exc_type(*args)
+ raise self.exc_type
+
+ def __str__(self):
+ self.raise_exc("__str__")
+
+ def __repr__(self):
+ self.raise_exc("__repr__")
+
+ class BrokenObj:
+ def __init__(self, exc):
+ self.exc = exc
+
+ def __repr__(self):
+ raise self.exc
+
+ __str__ = __repr__
+
+ baseexc_str = BaseException("__str__")
+ obj = BrokenObj(RaisingOnStrRepr([BaseException]))
+ assert saferepr(obj) == (
+ "<[unpresentable exception ({!r}) "
+ "raised in repr()] BrokenObj object at 0x{:x}>".format(baseexc_str, id(obj))
+ )
+ obj = BrokenObj(RaisingOnStrRepr([RaisingOnStrRepr([BaseException])]))
+ assert saferepr(obj) == (
+ "<[{!r} raised in repr()] BrokenObj object at 0x{:x}>".format(
+ baseexc_str, id(obj)
+ )
+ )
+
+ with pytest.raises(KeyboardInterrupt):
+ saferepr(BrokenObj(KeyboardInterrupt()))
+
+ with pytest.raises(SystemExit):
+ saferepr(BrokenObj(SystemExit()))
+
+ with pytest.raises(KeyboardInterrupt):
+ saferepr(BrokenObj(RaisingOnStrRepr([KeyboardInterrupt])))
+
+ with pytest.raises(SystemExit):
+ saferepr(BrokenObj(RaisingOnStrRepr([SystemExit])))
+
+ with pytest.raises(KeyboardInterrupt):
+ print(saferepr(BrokenObj(RaisingOnStrRepr([BaseException, KeyboardInterrupt]))))
+
+ with pytest.raises(SystemExit):
+ saferepr(BrokenObj(RaisingOnStrRepr([BaseException, SystemExit])))
def test_buggy_builtin_repr():
diff --git a/testing/logging/test_fixture.py b/testing/logging/test_fixture.py
index 5d2ff4654..c68866bef 100644
--- a/testing/logging/test_fixture.py
+++ b/testing/logging/test_fixture.py
@@ -46,7 +46,7 @@ def test_change_level_undo(testdir):
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*log from test1*", "*2 failed in *"])
- assert "log from test2" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*log from test2*")
def test_with_statement(caplog):
diff --git a/testing/logging/test_formatter.py b/testing/logging/test_formatter.py
index 6850a83cd..b363e8b03 100644
--- a/testing/logging/test_formatter.py
+++ b/testing/logging/test_formatter.py
@@ -53,13 +53,77 @@ def test_multiline_message():
# this is called by logging.Formatter.format
record.message = record.getMessage()
- style = PercentStyleMultiline(logfmt)
- output = style.format(record)
+ ai_on_style = PercentStyleMultiline(logfmt, True)
+ output = ai_on_style.format(record)
assert output == (
"dummypath 10 INFO Test Message line1\n"
" line2"
)
+ ai_off_style = PercentStyleMultiline(logfmt, False)
+ output = ai_off_style.format(record)
+ assert output == (
+ "dummypath 10 INFO Test Message line1\nline2"
+ )
+
+ ai_none_style = PercentStyleMultiline(logfmt, None)
+ output = ai_none_style.format(record)
+ assert output == (
+ "dummypath 10 INFO Test Message line1\nline2"
+ )
+
+ record.auto_indent = False
+ output = ai_on_style.format(record)
+ assert output == (
+ "dummypath 10 INFO Test Message line1\nline2"
+ )
+
+ record.auto_indent = True
+ output = ai_off_style.format(record)
+ assert output == (
+ "dummypath 10 INFO Test Message line1\n"
+ " line2"
+ )
+
+ record.auto_indent = "False"
+ output = ai_on_style.format(record)
+ assert output == (
+ "dummypath 10 INFO Test Message line1\nline2"
+ )
+
+ record.auto_indent = "True"
+ output = ai_off_style.format(record)
+ assert output == (
+ "dummypath 10 INFO Test Message line1\n"
+ " line2"
+ )
+
+ # bad string values default to False
+ record.auto_indent = "junk"
+ output = ai_off_style.format(record)
+ assert output == (
+ "dummypath 10 INFO Test Message line1\nline2"
+ )
+
+ # anything other than string or int will default to False
+ record.auto_indent = dict()
+ output = ai_off_style.format(record)
+ assert output == (
+ "dummypath 10 INFO Test Message line1\nline2"
+ )
+
+ record.auto_indent = "5"
+ output = ai_off_style.format(record)
+ assert output == (
+ "dummypath 10 INFO Test Message line1\n line2"
+ )
+
+ record.auto_indent = 5
+ output = ai_off_style.format(record)
+ assert output == (
+ "dummypath 10 INFO Test Message line1\n line2"
+ )
+
def test_colored_short_level():
logfmt = "%(levelname).1s %(message)s"
diff --git a/testing/logging/test_reporting.py b/testing/logging/test_reporting.py
index 1ae0bd783..5b24ef963 100644
--- a/testing/logging/test_reporting.py
+++ b/testing/logging/test_reporting.py
@@ -109,7 +109,7 @@ def test_log_cli_level_log_level_interaction(testdir):
"=* 1 failed in *=",
]
)
- assert "DEBUG" not in result.stdout.str()
+ result.stdout.no_re_match_line("DEBUG")
def test_setup_logging(testdir):
@@ -282,7 +282,7 @@ def test_log_cli_default_level(testdir):
"WARNING*test_log_cli_default_level.py* message will be shown*",
]
)
- assert "INFO message won't be shown" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*INFO message won't be shown*")
# make sure that that we get a '0' exit code for the testsuite
assert result.ret == 0
@@ -566,7 +566,7 @@ def test_log_cli_level(testdir):
"PASSED", # 'PASSED' on its own line because the log message prints a new line
]
)
- assert "This log message won't be shown" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*This log message won't be shown*")
# make sure that that we get a '0' exit code for the testsuite
assert result.ret == 0
@@ -580,7 +580,7 @@ def test_log_cli_level(testdir):
"PASSED", # 'PASSED' on its own line because the log message prints a new line
]
)
- assert "This log message won't be shown" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*This log message won't be shown*")
# make sure that that we get a '0' exit code for the testsuite
assert result.ret == 0
@@ -616,7 +616,7 @@ def test_log_cli_ini_level(testdir):
"PASSED", # 'PASSED' on its own line because the log message prints a new line
]
)
- assert "This log message won't be shown" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*This log message won't be shown*")
# make sure that that we get a '0' exit code for the testsuite
assert result.ret == 0
@@ -942,7 +942,7 @@ def test_collection_collect_only_live_logging(testdir, verbose):
]
)
elif verbose == "-q":
- assert "collected 1 item*" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*collected 1 item**")
expected_lines.extend(
[
"*test_collection_collect_only_live_logging.py::test_simple*",
@@ -950,7 +950,7 @@ def test_collection_collect_only_live_logging(testdir, verbose):
]
)
elif verbose == "-qq":
- assert "collected 1 item*" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*collected 1 item**")
expected_lines.extend(["*test_collection_collect_only_live_logging.py: 1*"])
result.stdout.fnmatch_lines(expected_lines)
@@ -983,7 +983,7 @@ def test_collection_logging_to_file(testdir):
result = testdir.runpytest()
- assert "--- live log collection ---" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*--- live log collection ---*")
assert result.ret == 0
assert os.path.isfile(log_file)
diff --git a/testing/python/approx.py b/testing/python/approx.py
index 0575557ae..f72045624 100644
--- a/testing/python/approx.py
+++ b/testing/python/approx.py
@@ -1,4 +1,3 @@
-import doctest
import operator
from decimal import Decimal
from fractions import Fraction
@@ -11,68 +10,81 @@ from pytest import approx
inf, nan = float("inf"), float("nan")
-class MyDocTestRunner(doctest.DocTestRunner):
- def __init__(self):
- doctest.DocTestRunner.__init__(self)
+@pytest.fixture
+def mocked_doctest_runner(monkeypatch):
+ import doctest
- def report_failure(self, out, test, example, got):
- raise AssertionError(
- "'{}' evaluates to '{}', not '{}'".format(
- example.source.strip(), got.strip(), example.want.strip()
+ class MockedPdb:
+ def __init__(self, out):
+ pass
+
+ def set_trace(self):
+ raise NotImplementedError("not used")
+
+ def reset(self):
+ pass
+
+ def set_continue(self):
+ pass
+
+ monkeypatch.setattr("doctest._OutputRedirectingPdb", MockedPdb)
+
+ class MyDocTestRunner(doctest.DocTestRunner):
+ def report_failure(self, out, test, example, got):
+ raise AssertionError(
+ "'{}' evaluates to '{}', not '{}'".format(
+ example.source.strip(), got.strip(), example.want.strip()
+ )
)
- )
+
+ return MyDocTestRunner()
class TestApprox:
- @pytest.fixture
- def plus_minus(self):
- return "\u00b1"
-
- def test_repr_string(self, plus_minus):
- tol1, tol2, infr = "1.0e-06", "2.0e-06", "inf"
- assert repr(approx(1.0)) == "1.0 {pm} {tol1}".format(pm=plus_minus, tol1=tol1)
- assert repr(
- approx([1.0, 2.0])
- ) == "approx([1.0 {pm} {tol1}, 2.0 {pm} {tol2}])".format(
- pm=plus_minus, tol1=tol1, tol2=tol2
- )
- assert repr(
- approx((1.0, 2.0))
- ) == "approx((1.0 {pm} {tol1}, 2.0 {pm} {tol2}))".format(
- pm=plus_minus, tol1=tol1, tol2=tol2
- )
+ def test_repr_string(self):
+ assert repr(approx(1.0)) == "1.0 ± 1.0e-06"
+ assert repr(approx([1.0, 2.0])) == "approx([1.0 ± 1.0e-06, 2.0 ± 2.0e-06])"
+ assert repr(approx((1.0, 2.0))) == "approx((1.0 ± 1.0e-06, 2.0 ± 2.0e-06))"
assert repr(approx(inf)) == "inf"
- assert repr(approx(1.0, rel=nan)) == "1.0 {pm} ???".format(pm=plus_minus)
- assert repr(approx(1.0, rel=inf)) == "1.0 {pm} {infr}".format(
- pm=plus_minus, infr=infr
- )
- assert repr(approx(1.0j, rel=inf)) == "1j"
+ assert repr(approx(1.0, rel=nan)) == "1.0 ± ???"
+ assert repr(approx(1.0, rel=inf)) == "1.0 ± inf"
# Dictionaries aren't ordered, so we need to check both orders.
assert repr(approx({"a": 1.0, "b": 2.0})) in (
- "approx({{'a': 1.0 {pm} {tol1}, 'b': 2.0 {pm} {tol2}}})".format(
- pm=plus_minus, tol1=tol1, tol2=tol2
- ),
- "approx({{'b': 2.0 {pm} {tol2}, 'a': 1.0 {pm} {tol1}}})".format(
- pm=plus_minus, tol1=tol1, tol2=tol2
- ),
+ "approx({'a': 1.0 ± 1.0e-06, 'b': 2.0 ± 2.0e-06})",
+ "approx({'b': 2.0 ± 2.0e-06, 'a': 1.0 ± 1.0e-06})",
)
+ def test_repr_complex_numbers(self):
+ assert repr(approx(inf + 1j)) == "(inf+1j)"
+ assert repr(approx(1.0j, rel=inf)) == "1j ± inf"
+
+ # can't compute a sensible tolerance
+ assert repr(approx(nan + 1j)) == "(nan+1j) ± ???"
+
+ assert repr(approx(1.0j)) == "1j ± 1.0e-06 ∠ ±180°"
+
+ # relative tolerance is scaled to |3+4j| = 5
+ assert repr(approx(3 + 4 * 1j)) == "(3+4j) ± 5.0e-06 ∠ ±180°"
+
+ # absolute tolerance is not scaled
+ assert repr(approx(3.3 + 4.4 * 1j, abs=0.02)) == "(3.3+4.4j) ± 2.0e-02 ∠ ±180°"
+
@pytest.mark.parametrize(
- "value, repr_string",
+ "value, expected_repr_string",
[
- (5.0, "approx(5.0 {pm} 5.0e-06)"),
- ([5.0], "approx([5.0 {pm} 5.0e-06])"),
- ([[5.0]], "approx([[5.0 {pm} 5.0e-06]])"),
- ([[5.0, 6.0]], "approx([[5.0 {pm} 5.0e-06, 6.0 {pm} 6.0e-06]])"),
- ([[5.0], [6.0]], "approx([[5.0 {pm} 5.0e-06], [6.0 {pm} 6.0e-06]])"),
+ (5.0, "approx(5.0 ± 5.0e-06)"),
+ ([5.0], "approx([5.0 ± 5.0e-06])"),
+ ([[5.0]], "approx([[5.0 ± 5.0e-06]])"),
+ ([[5.0, 6.0]], "approx([[5.0 ± 5.0e-06, 6.0 ± 6.0e-06]])"),
+ ([[5.0], [6.0]], "approx([[5.0 ± 5.0e-06], [6.0 ± 6.0e-06]])"),
],
)
- def test_repr_nd_array(self, plus_minus, value, repr_string):
+ def test_repr_nd_array(self, value, expected_repr_string):
"""Make sure that arrays of all different dimensions are repr'd correctly."""
np = pytest.importorskip("numpy")
np_array = np.array(value)
- assert repr(approx(np_array)) == repr_string.format(pm=plus_minus)
+ assert repr(approx(np_array)) == expected_repr_string
def test_operator_overloading(self):
assert 1 == approx(1, rel=1e-6, abs=1e-12)
@@ -416,13 +428,14 @@ class TestApprox:
assert a12 != approx(a21)
assert a21 != approx(a12)
- def test_doctests(self):
+ def test_doctests(self, mocked_doctest_runner):
+ import doctest
+
parser = doctest.DocTestParser()
test = parser.get_doctest(
approx.__doc__, {"approx": approx}, approx.__name__, None, None
)
- runner = MyDocTestRunner()
- runner.run(test)
+ mocked_doctest_runner.run(test)
def test_unicode_plus_minus(self, testdir):
"""
diff --git a/testing/python/collect.py b/testing/python/collect.py
index f0c12df16..30f9841b5 100644
--- a/testing/python/collect.py
+++ b/testing/python/collect.py
@@ -1139,7 +1139,7 @@ def test_unorderable_types(testdir):
"""
)
result = testdir.runpytest()
- assert "TypeError" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*TypeError*")
assert result.ret == ExitCode.NO_TESTS_COLLECTED
@@ -1167,7 +1167,7 @@ def test_dont_collect_non_function_callable(testdir):
[
"*collected 1 item*",
"*test_dont_collect_non_function_callable.py:2: *cannot collect 'test_a' because it is not a function*",
- "*1 passed, 1 warnings in *",
+ "*1 passed, 1 warning in *",
]
)
diff --git a/testing/python/fixtures.py b/testing/python/fixtures.py
index 45c56f9a8..52fd32cc4 100644
--- a/testing/python/fixtures.py
+++ b/testing/python/fixtures.py
@@ -455,7 +455,7 @@ class TestFillFixtures:
"*1 error*",
]
)
- assert "INTERNAL" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*INTERNAL*")
def test_fixture_excinfo_leak(self, testdir):
# on python2 sys.excinfo would leak into fixture executions
@@ -503,7 +503,7 @@ class TestRequestBasic:
assert repr(req).find(req.function.__name__) != -1
def test_request_attributes_method(self, testdir):
- item, = testdir.getitems(
+ (item,) = testdir.getitems(
"""
import pytest
class TestB(object):
@@ -531,7 +531,7 @@ class TestRequestBasic:
pass
"""
)
- item1, = testdir.genitems([modcol])
+ (item1,) = testdir.genitems([modcol])
assert item1.name == "test_method"
arg2fixturedefs = fixtures.FixtureRequest(item1)._arg2fixturedefs
assert len(arg2fixturedefs) == 1
@@ -781,7 +781,7 @@ class TestRequestBasic:
def test_request_getmodulepath(self, testdir):
modcol = testdir.getmodulecol("def test_somefunc(): pass")
- item, = testdir.genitems([modcol])
+ (item,) = testdir.genitems([modcol])
req = fixtures.FixtureRequest(item)
assert req.fspath == modcol.fspath
@@ -2647,7 +2647,7 @@ class TestFixtureMarker:
*3 passed*
"""
)
- assert "error" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*error*")
def test_fixture_finalizer(self, testdir):
testdir.makeconftest(
@@ -3081,7 +3081,7 @@ class TestErrors:
*KeyError*
*ERROR*teardown*test_2*
*KeyError*
- *3 pass*2 error*
+ *3 pass*2 errors*
"""
)
@@ -3151,7 +3151,7 @@ class TestShowFixtures:
*hello world*
"""
)
- assert "arg0" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*arg0*")
@pytest.mark.parametrize("testmod", [True, False])
def test_show_fixtures_conftest(self, testdir, testmod):
diff --git a/testing/python/metafunc.py b/testing/python/metafunc.py
index 860b21ff2..1c396c4a7 100644
--- a/testing/python/metafunc.py
+++ b/testing/python/metafunc.py
@@ -12,7 +12,7 @@ from _pytest import python
class TestMetafunc:
- def Metafunc(self, func, config=None):
+ def Metafunc(self, func, config=None) -> python.Metafunc:
# the unit tests of this class check if things work correctly
# on the funcarg level, so we don't need a full blown
# initialization
@@ -23,7 +23,7 @@ class TestMetafunc:
self.names_closure = names
@attr.s
- class DefinitionMock:
+ class DefinitionMock(python.FunctionDefinition):
obj = attr.ib()
names = fixtures.getfuncargnames(func)
@@ -1551,27 +1551,6 @@ class TestMarkersWithParametrization:
assert len(skipped) == 0
assert len(fail) == 0
- @pytest.mark.xfail(reason="is this important to support??")
- def test_nested_marks(self, testdir):
- s = """
- import pytest
- mastermark = pytest.mark.foo(pytest.mark.bar)
-
- @pytest.mark.parametrize(("n", "expected"), [
- (1, 2),
- mastermark((1, 3)),
- (2, 3),
- ])
- def test_increment(n, expected):
- assert n + 1 == expected
- """
- items = testdir.getitems(s)
- assert len(items) == 3
- for mark in ["foo", "bar"]:
- assert mark not in items[0].keywords
- assert mark in items[1].keywords
- assert mark not in items[2].keywords
-
def test_simple_xfail(self, testdir):
s = """
import pytest
diff --git a/testing/python/raises.py b/testing/python/raises.py
index 28b0715c0..1c701796a 100644
--- a/testing/python/raises.py
+++ b/testing/python/raises.py
@@ -205,7 +205,7 @@ class TestRaises:
with pytest.raises(AssertionError) as excinfo:
with pytest.raises(AssertionError, match="'foo"):
raise AssertionError("'bar")
- msg, = excinfo.value.args
+ (msg,) = excinfo.value.args
assert msg == 'Pattern "\'foo" not found in "\'bar"'
def test_raises_match_wrong_type(self):
diff --git a/testing/python/show_fixtures_per_test.py b/testing/python/show_fixtures_per_test.py
index aff8aa0e5..ef841819d 100644
--- a/testing/python/show_fixtures_per_test.py
+++ b/testing/python/show_fixtures_per_test.py
@@ -1,6 +1,6 @@
def test_no_items_should_not_show_output(testdir):
result = testdir.runpytest("--fixtures-per-test")
- assert "fixtures used by" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*fixtures used by*")
assert result.ret == 0
@@ -30,7 +30,7 @@ def test_fixtures_in_module(testdir):
" arg1 docstring",
]
)
- assert "_arg0" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*_arg0*")
def test_fixtures_in_conftest(testdir):
diff --git a/testing/test_assertion.py b/testing/test_assertion.py
index 8fce5e279..e4d68ff8c 100644
--- a/testing/test_assertion.py
+++ b/testing/test_assertion.py
@@ -12,13 +12,11 @@ from _pytest.assertion import util
from _pytest.compat import ATTRS_EQ_FIELD
-def mock_config():
+def mock_config(verbose=0):
class Config:
- verbose = False
-
def getoption(self, name):
if name == "verbose":
- return self.verbose
+ return verbose
raise KeyError("Not mocked out: %s" % name)
return Config()
@@ -72,7 +70,14 @@ class TestImportHookInstallation:
"""
)
result = testdir.runpytest_subprocess()
- result.stdout.fnmatch_lines(["*assert 1 == 0*"])
+ result.stdout.fnmatch_lines(
+ [
+ "E * AssertionError: ([[][]], [[][]], [[]<TestReport *>[]])*",
+ "E * assert"
+ " {'failed': 1, 'passed': 0, 'skipped': 0} =="
+ " {'failed': 0, 'passed': 1, 'skipped': 0}",
+ ]
+ )
@pytest.mark.parametrize("mode", ["plain", "rewrite"])
def test_pytest_plugins_rewrite(self, testdir, mode):
@@ -296,9 +301,8 @@ class TestBinReprIntegration:
result.stdout.fnmatch_lines(["*test_hello*FAIL*", "*test_check*PASS*"])
-def callequal(left, right, verbose=False):
- config = mock_config()
- config.verbose = verbose
+def callequal(left, right, verbose=0):
+ config = mock_config(verbose=verbose)
return plugin.pytest_assertrepr_compare(config, "==", left, right)
@@ -322,7 +326,7 @@ class TestAssert_reprcompare:
assert "a" * 50 not in line
def test_text_skipping_verbose(self):
- lines = callequal("a" * 50 + "spam", "a" * 50 + "eggs", verbose=True)
+ lines = callequal("a" * 50 + "spam", "a" * 50 + "eggs", verbose=1)
assert "- " + "a" * 50 + "spam" in lines
assert "+ " + "a" * 50 + "eggs" in lines
@@ -345,7 +349,7 @@ class TestAssert_reprcompare:
def test_bytes_diff_verbose(self):
"""Check special handling for bytes diff (#5260)"""
- diff = callequal(b"spam", b"eggs", verbose=True)
+ diff = callequal(b"spam", b"eggs", verbose=1)
assert diff == [
"b'spam' == b'eggs'",
"At index 0 diff: b's' != b'e'",
@@ -361,7 +365,7 @@ class TestAssert_reprcompare:
@pytest.mark.parametrize(
["left", "right", "expected"],
[
- (
+ pytest.param(
[0, 1],
[0, 2],
"""
@@ -371,8 +375,9 @@ class TestAssert_reprcompare:
+ [0, 2]
? ^
""",
+ id="lists",
),
- (
+ pytest.param(
{0: 1},
{0: 2},
"""
@@ -382,8 +387,9 @@ class TestAssert_reprcompare:
+ {0: 2}
? ^
""",
+ id="dicts",
),
- (
+ pytest.param(
{0, 1},
{0, 2},
"""
@@ -393,6 +399,7 @@ class TestAssert_reprcompare:
+ {0, 2}
? ^
""",
+ id="sets",
),
],
)
@@ -402,9 +409,9 @@ class TestAssert_reprcompare:
When verbose is False, then just a -v notice to get the diff is rendered,
when verbose is True, then ndiff of the pprint is returned.
"""
- expl = callequal(left, right, verbose=False)
+ expl = callequal(left, right, verbose=0)
assert expl[-1] == "Use -v to get the full diff"
- expl = "\n".join(callequal(left, right, verbose=True))
+ expl = "\n".join(callequal(left, right, verbose=1))
assert expl.endswith(textwrap.dedent(expected).strip())
def test_list_different_lengths(self):
@@ -413,6 +420,113 @@ class TestAssert_reprcompare:
expl = callequal([0, 1, 2], [0, 1])
assert len(expl) > 1
+ def test_list_wrap_for_multiple_lines(self):
+ long_d = "d" * 80
+ l1 = ["a", "b", "c"]
+ l2 = ["a", "b", "c", long_d]
+ diff = callequal(l1, l2, verbose=True)
+ assert diff == [
+ "['a', 'b', 'c'] == ['a', 'b', 'c...dddddddddddd']",
+ "Right contains one more item: '" + long_d + "'",
+ "Full diff:",
+ " [",
+ " 'a',",
+ " 'b',",
+ " 'c',",
+ "+ '" + long_d + "',",
+ " ]",
+ ]
+
+ diff = callequal(l2, l1, verbose=True)
+ assert diff == [
+ "['a', 'b', 'c...dddddddddddd'] == ['a', 'b', 'c']",
+ "Left contains one more item: '" + long_d + "'",
+ "Full diff:",
+ " [",
+ " 'a',",
+ " 'b',",
+ " 'c',",
+ "- '" + long_d + "',",
+ " ]",
+ ]
+
+ def test_list_wrap_for_width_rewrap_same_length(self):
+ long_a = "a" * 30
+ long_b = "b" * 30
+ long_c = "c" * 30
+ l1 = [long_a, long_b, long_c]
+ l2 = [long_b, long_c, long_a]
+ diff = callequal(l1, l2, verbose=True)
+ assert diff == [
+ "['aaaaaaaaaaa...cccccccccccc'] == ['bbbbbbbbbbb...aaaaaaaaaaaa']",
+ "At index 0 diff: 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' != 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbb'",
+ "Full diff:",
+ " [",
+ "- 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',",
+ " 'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbb',",
+ " 'cccccccccccccccccccccccccccccc',",
+ "+ 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',",
+ " ]",
+ ]
+
+ def test_list_dont_wrap_strings(self):
+ long_a = "a" * 10
+ l1 = ["a"] + [long_a for _ in range(0, 7)]
+ l2 = ["should not get wrapped"]
+ diff = callequal(l1, l2, verbose=True)
+ assert diff == [
+ "['a', 'aaaaaa...aaaaaaa', ...] == ['should not get wrapped']",
+ "At index 0 diff: 'a' != 'should not get wrapped'",
+ "Left contains 7 more items, first extra item: 'aaaaaaaaaa'",
+ "Full diff:",
+ " [",
+ "+ 'should not get wrapped',",
+ "- 'a',",
+ "- 'aaaaaaaaaa',",
+ "- 'aaaaaaaaaa',",
+ "- 'aaaaaaaaaa',",
+ "- 'aaaaaaaaaa',",
+ "- 'aaaaaaaaaa',",
+ "- 'aaaaaaaaaa',",
+ "- 'aaaaaaaaaa',",
+ " ]",
+ ]
+
+ def test_dict_wrap(self):
+ d1 = {"common": 1, "env": {"env1": 1}}
+ d2 = {"common": 1, "env": {"env1": 1, "env2": 2}}
+
+ diff = callequal(d1, d2, verbose=True)
+ assert diff == [
+ "{'common': 1,...: {'env1': 1}} == {'common': 1,...1, 'env2': 2}}",
+ "Omitting 1 identical items, use -vv to show",
+ "Differing items:",
+ "{'env': {'env1': 1}} != {'env': {'env1': 1, 'env2': 2}}",
+ "Full diff:",
+ "- {'common': 1, 'env': {'env1': 1}}",
+ "+ {'common': 1, 'env': {'env1': 1, 'env2': 2}}",
+ "? +++++++++++",
+ ]
+
+ long_a = "a" * 80
+ sub = {"long_a": long_a, "sub1": {"long_a": "substring that gets wrapped " * 2}}
+ d1 = {"env": {"sub": sub}}
+ d2 = {"env": {"sub": sub}, "new": 1}
+ diff = callequal(d1, d2, verbose=True)
+ assert diff == [
+ "{'env': {'sub... wrapped '}}}} == {'env': {'sub...}}}, 'new': 1}",
+ "Omitting 1 identical items, use -vv to show",
+ "Right contains 1 more item:",
+ "{'new': 1}",
+ "Full diff:",
+ " {",
+ " 'env': {'sub': {'long_a': '" + long_a + "',",
+ " 'sub1': {'long_a': 'substring that gets wrapped substring '",
+ " 'that gets wrapped '}}},",
+ "+ 'new': 1,",
+ " }",
+ ]
+
def test_dict(self):
expl = callequal({"a": 0}, {"a": 1})
assert len(expl) > 1
@@ -1034,7 +1148,7 @@ def test_assertion_options(testdir):
result = testdir.runpytest()
assert "3 == 4" in result.stdout.str()
result = testdir.runpytest_subprocess("--assert=plain")
- assert "3 == 4" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*3 == 4*")
def test_triple_quoted_string_issue113(testdir):
@@ -1046,7 +1160,7 @@ def test_triple_quoted_string_issue113(testdir):
)
result = testdir.runpytest("--fulltrace")
result.stdout.fnmatch_lines(["*1 failed*"])
- assert "SyntaxError" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*SyntaxError*")
def test_traceback_failure(testdir):
diff --git a/testing/test_assertrewrite.py b/testing/test_assertrewrite.py
index 89b23a72c..8490a59e6 100644
--- a/testing/test_assertrewrite.py
+++ b/testing/test_assertrewrite.py
@@ -17,9 +17,12 @@ import pytest
from _pytest.assertion import util
from _pytest.assertion.rewrite import _get_assertion_exprs
from _pytest.assertion.rewrite import AssertionRewritingHook
+from _pytest.assertion.rewrite import get_cache_dir
+from _pytest.assertion.rewrite import PYC_TAIL
from _pytest.assertion.rewrite import PYTEST_TAG
from _pytest.assertion.rewrite import rewrite_asserts
from _pytest.main import ExitCode
+from _pytest.pathlib import Path
def setup_module(mod):
@@ -119,7 +122,7 @@ class TestAssertionRewrite:
}
testdir.makepyfile(**contents)
result = testdir.runpytest_subprocess()
- assert "warnings" not in "".join(result.outlines)
+ assert "warning" not in "".join(result.outlines)
def test_rewrites_plugin_as_a_package(self, testdir):
pkgdir = testdir.mkpydir("plugin")
@@ -190,11 +193,12 @@ class TestAssertionRewrite:
pass
msg = getmsg(f, {"cls": X}).splitlines()
- if verbose > 0:
-
+ if verbose > 1:
+ assert msg == ["assert {!r} == 42".format(X), " -{!r}".format(X), " +42"]
+ elif verbose > 0:
assert msg == [
"assert <class 'test_...e.<locals>.X'> == 42",
- " -<class 'test_assertrewrite.TestAssertionRewrite.test_name.<locals>.X'>",
+ " -{!r}".format(X),
" +42",
]
else:
@@ -206,9 +210,17 @@ class TestAssertionRewrite:
def f():
assert "1234567890" * 5 + "A" == "1234567890" * 5 + "B"
- assert getmsg(f).splitlines()[0] == (
- "assert '123456789012...901234567890A' == '123456789012...901234567890B'"
- )
+ msg = getmsg(f).splitlines()[0]
+ if request.config.getoption("verbose") > 1:
+ assert msg == (
+ "assert '12345678901234567890123456789012345678901234567890A' "
+ "== '12345678901234567890123456789012345678901234567890B'"
+ )
+ else:
+ assert msg == (
+ "assert '123456789012...901234567890A' "
+ "== '123456789012...901234567890B'"
+ )
def test_dont_rewrite_if_hasattr_fails(self, request):
class Y:
@@ -914,7 +926,7 @@ def test_rewritten():
testdir.chdir()
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(["*= 1 passed in *=*"])
- assert "pytest-warning summary" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*pytest-warning summary*")
def test_rewrite_warning_using_pytest_plugins_env_var(self, testdir, monkeypatch):
monkeypatch.setenv("PYTEST_PLUGINS", "plugin")
@@ -932,7 +944,7 @@ def test_rewritten():
testdir.chdir()
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(["*= 1 passed in *=*"])
- assert "pytest-warning summary" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*pytest-warning summary*")
class TestAssertionRewriteHookDetails:
@@ -947,24 +959,34 @@ class TestAssertionRewriteHookDetails:
def test_write_pyc(self, testdir, tmpdir, monkeypatch):
from _pytest.assertion.rewrite import _write_pyc
from _pytest.assertion import AssertionState
- import atomicwrites
- from contextlib import contextmanager
config = testdir.parseconfig([])
state = AssertionState(config, "rewrite")
- source_path = tmpdir.ensure("source.py")
+ source_path = str(tmpdir.ensure("source.py"))
pycpath = tmpdir.join("pyc").strpath
- assert _write_pyc(state, [1], os.stat(source_path.strpath), pycpath)
+ assert _write_pyc(state, [1], os.stat(source_path), pycpath)
- @contextmanager
- def atomic_write_failed(fn, mode="r", overwrite=False):
- e = IOError()
- e.errno = 10
- raise e
- yield
+ if sys.platform == "win32":
+ from contextlib import contextmanager
- monkeypatch.setattr(atomicwrites, "atomic_write", atomic_write_failed)
- assert not _write_pyc(state, [1], source_path.stat(), pycpath)
+ @contextmanager
+ def atomic_write_failed(fn, mode="r", overwrite=False):
+ e = IOError()
+ e.errno = 10
+ raise e
+ yield
+
+ monkeypatch.setattr(
+ _pytest.assertion.rewrite, "atomic_write", atomic_write_failed
+ )
+ else:
+
+ def raise_ioerror(*args):
+ raise IOError()
+
+ monkeypatch.setattr("os.rename", raise_ioerror)
+
+ assert not _write_pyc(state, [1], os.stat(source_path), pycpath)
def test_resources_provider_for_loader(self, testdir):
"""
@@ -1124,7 +1146,7 @@ def test_issue731(testdir):
"""
)
result = testdir.runpytest()
- assert "unbalanced braces" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*unbalanced braces*")
class TestIssue925:
@@ -1542,41 +1564,97 @@ def test_get_assertion_exprs(src, expected):
assert _get_assertion_exprs(src) == expected
-def test_try_mkdir(monkeypatch, tmp_path):
- from _pytest.assertion.rewrite import try_mkdir
+def test_try_makedirs(monkeypatch, tmp_path):
+ from _pytest.assertion.rewrite import try_makedirs
p = tmp_path / "foo"
# create
- assert try_mkdir(str(p))
+ assert try_makedirs(str(p))
assert p.is_dir()
# already exist
- assert try_mkdir(str(p))
+ assert try_makedirs(str(p))
# monkeypatch to simulate all error situations
- def fake_mkdir(p, *, exc):
+ def fake_mkdir(p, exist_ok=False, *, exc):
assert isinstance(p, str)
raise exc
- monkeypatch.setattr(os, "mkdir", partial(fake_mkdir, exc=FileNotFoundError()))
- assert not try_mkdir(str(p))
+ monkeypatch.setattr(os, "makedirs", partial(fake_mkdir, exc=FileNotFoundError()))
+ assert not try_makedirs(str(p))
- monkeypatch.setattr(os, "mkdir", partial(fake_mkdir, exc=NotADirectoryError()))
- assert not try_mkdir(str(p))
+ monkeypatch.setattr(os, "makedirs", partial(fake_mkdir, exc=NotADirectoryError()))
+ assert not try_makedirs(str(p))
- monkeypatch.setattr(os, "mkdir", partial(fake_mkdir, exc=PermissionError()))
- assert not try_mkdir(str(p))
+ monkeypatch.setattr(os, "makedirs", partial(fake_mkdir, exc=PermissionError()))
+ assert not try_makedirs(str(p))
err = OSError()
err.errno = errno.EROFS
- monkeypatch.setattr(os, "mkdir", partial(fake_mkdir, exc=err))
- assert not try_mkdir(str(p))
+ monkeypatch.setattr(os, "makedirs", partial(fake_mkdir, exc=err))
+ assert not try_makedirs(str(p))
# unhandled OSError should raise
err = OSError()
err.errno = errno.ECHILD
- monkeypatch.setattr(os, "mkdir", partial(fake_mkdir, exc=err))
+ monkeypatch.setattr(os, "makedirs", partial(fake_mkdir, exc=err))
with pytest.raises(OSError) as exc_info:
- try_mkdir(str(p))
+ try_makedirs(str(p))
assert exc_info.value.errno == errno.ECHILD
+
+
+class TestPyCacheDir:
+ @pytest.mark.parametrize(
+ "prefix, source, expected",
+ [
+ ("c:/tmp/pycs", "d:/projects/src/foo.py", "c:/tmp/pycs/projects/src"),
+ (None, "d:/projects/src/foo.py", "d:/projects/src/__pycache__"),
+ ("/tmp/pycs", "/home/projects/src/foo.py", "/tmp/pycs/home/projects/src"),
+ (None, "/home/projects/src/foo.py", "/home/projects/src/__pycache__"),
+ ],
+ )
+ def test_get_cache_dir(self, monkeypatch, prefix, source, expected):
+ if prefix:
+ if sys.version_info < (3, 8):
+ pytest.skip("pycache_prefix not available in py<38")
+ monkeypatch.setattr(sys, "pycache_prefix", prefix)
+
+ assert get_cache_dir(Path(source)) == Path(expected)
+
+ @pytest.mark.skipif(
+ sys.version_info < (3, 8), reason="pycache_prefix not available in py<38"
+ )
+ def test_sys_pycache_prefix_integration(self, tmp_path, monkeypatch, testdir):
+ """Integration test for sys.pycache_prefix (#4730)."""
+ pycache_prefix = tmp_path / "my/pycs"
+ monkeypatch.setattr(sys, "pycache_prefix", str(pycache_prefix))
+ monkeypatch.setattr(sys, "dont_write_bytecode", False)
+
+ testdir.makepyfile(
+ **{
+ "src/test_foo.py": """
+ import bar
+ def test_foo():
+ pass
+ """,
+ "src/bar/__init__.py": "",
+ }
+ )
+ result = testdir.runpytest()
+ assert result.ret == 0
+
+ test_foo = Path(testdir.tmpdir) / "src/test_foo.py"
+ bar_init = Path(testdir.tmpdir) / "src/bar/__init__.py"
+ assert test_foo.is_file()
+ assert bar_init.is_file()
+
+ # test file: rewritten, custom pytest cache tag
+ test_foo_pyc = get_cache_dir(test_foo) / ("test_foo" + PYC_TAIL)
+ assert test_foo_pyc.is_file()
+
+ # normal file: not touched by pytest, normal cache tag
+ bar_init_pyc = get_cache_dir(bar_init) / "__init__.{cache_tag}.pyc".format(
+ cache_tag=sys.implementation.cache_tag
+ )
+ assert bar_init_pyc.is_file()
diff --git a/testing/test_cacheprovider.py b/testing/test_cacheprovider.py
index 4fe10c6d4..0e1194b02 100644
--- a/testing/test_cacheprovider.py
+++ b/testing/test_cacheprovider.py
@@ -1,7 +1,7 @@
import os
import shutil
+import stat
import sys
-import textwrap
import py
@@ -45,26 +45,35 @@ class TestNewAPI:
)
def test_cache_writefail_permissions(self, testdir):
testdir.makeini("[pytest]")
+ cache_dir = str(testdir.tmpdir.ensure_dir(".pytest_cache"))
+ mode = os.stat(cache_dir)[stat.ST_MODE]
testdir.tmpdir.ensure_dir(".pytest_cache").chmod(0)
- config = testdir.parseconfigure()
- cache = config.cache
- cache.set("test/broken", [])
+ try:
+ config = testdir.parseconfigure()
+ cache = config.cache
+ cache.set("test/broken", [])
+ finally:
+ testdir.tmpdir.ensure_dir(".pytest_cache").chmod(mode)
@pytest.mark.skipif(sys.platform.startswith("win"), reason="no chmod on windows")
- @pytest.mark.filterwarnings("default")
- def test_cache_failure_warns(self, testdir):
+ @pytest.mark.filterwarnings(
+ "ignore:could not create cache path:pytest.PytestWarning"
+ )
+ def test_cache_failure_warns(self, testdir, monkeypatch):
+ monkeypatch.setenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD", "1")
+ cache_dir = str(testdir.tmpdir.ensure_dir(".pytest_cache"))
+ mode = os.stat(cache_dir)[stat.ST_MODE]
testdir.tmpdir.ensure_dir(".pytest_cache").chmod(0)
- testdir.makepyfile(
- """
- def test_error():
- raise Exception
-
- """
- )
- result = testdir.runpytest("-rw")
- assert result.ret == 1
- # warnings from nodeids, lastfailed, and stepwise
- result.stdout.fnmatch_lines(["*could not create cache path*", "*3 warnings*"])
+ try:
+ testdir.makepyfile("def test_error(): raise Exception")
+ result = testdir.runpytest("-rw")
+ assert result.ret == 1
+ # warnings from nodeids, lastfailed, and stepwise
+ result.stdout.fnmatch_lines(
+ ["*could not create cache path*", "*3 warnings*"]
+ )
+ finally:
+ testdir.tmpdir.ensure_dir(".pytest_cache").chmod(mode)
def test_config_cache(self, testdir):
testdir.makeconftest(
@@ -163,12 +172,7 @@ def test_cache_reportheader_external_abspath(testdir, tmpdir_factory):
"test_cache_reportheader_external_abspath_abs"
)
- testdir.makepyfile(
- """
- def test_hello():
- pass
- """
- )
+ testdir.makepyfile("def test_hello(): pass")
testdir.makeini(
"""
[pytest]
@@ -177,7 +181,6 @@ def test_cache_reportheader_external_abspath(testdir, tmpdir_factory):
abscache=external_cache
)
)
-
result = testdir.runpytest("-v")
result.stdout.fnmatch_lines(
["cachedir: {abscache}".format(abscache=external_cache)]
@@ -238,36 +241,26 @@ def test_cache_show(testdir):
class TestLastFailed:
def test_lastfailed_usecase(self, testdir, monkeypatch):
- monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", "1")
+ monkeypatch.setattr("sys.dont_write_bytecode", True)
p = testdir.makepyfile(
"""
- def test_1():
- assert 0
- def test_2():
- assert 0
- def test_3():
- assert 1
- """
+ def test_1(): assert 0
+ def test_2(): assert 0
+ def test_3(): assert 1
+ """
)
- result = testdir.runpytest()
+ result = testdir.runpytest(str(p))
result.stdout.fnmatch_lines(["*2 failed*"])
- p.write(
- textwrap.dedent(
- """\
- def test_1():
- assert 1
-
- def test_2():
- assert 1
-
- def test_3():
- assert 0
- """
- )
+ p = testdir.makepyfile(
+ """
+ def test_1(): assert 1
+ def test_2(): assert 1
+ def test_3(): assert 0
+ """
)
- result = testdir.runpytest("--lf")
+ result = testdir.runpytest(str(p), "--lf")
result.stdout.fnmatch_lines(["*2 passed*1 desel*"])
- result = testdir.runpytest("--lf")
+ result = testdir.runpytest(str(p), "--lf")
result.stdout.fnmatch_lines(
[
"collected 3 items",
@@ -275,7 +268,7 @@ class TestLastFailed:
"*1 failed*2 passed*",
]
)
- result = testdir.runpytest("--lf", "--cache-clear")
+ result = testdir.runpytest(str(p), "--lf", "--cache-clear")
result.stdout.fnmatch_lines(["*1 failed*2 passed*"])
# Run this again to make sure clear-cache is robust
@@ -285,21 +278,9 @@ class TestLastFailed:
result.stdout.fnmatch_lines(["*1 failed*2 passed*"])
def test_failedfirst_order(self, testdir):
- testdir.tmpdir.join("test_a.py").write(
- textwrap.dedent(
- """\
- def test_always_passes():
- assert 1
- """
- )
- )
- testdir.tmpdir.join("test_b.py").write(
- textwrap.dedent(
- """\
- def test_always_fails():
- assert 0
- """
- )
+ testdir.makepyfile(
+ test_a="def test_always_passes(): pass",
+ test_b="def test_always_fails(): assert 0",
)
result = testdir.runpytest()
# Test order will be collection order; alphabetical
@@ -310,16 +291,8 @@ class TestLastFailed:
def test_lastfailed_failedfirst_order(self, testdir):
testdir.makepyfile(
- **{
- "test_a.py": """\
- def test_always_passes():
- assert 1
- """,
- "test_b.py": """\
- def test_always_fails():
- assert 0
- """,
- }
+ test_a="def test_always_passes(): assert 1",
+ test_b="def test_always_fails(): assert 0",
)
result = testdir.runpytest()
# Test order will be collection order; alphabetical
@@ -327,21 +300,16 @@ class TestLastFailed:
result = testdir.runpytest("--lf", "--ff")
# Test order will be failing tests firs
result.stdout.fnmatch_lines(["test_b.py*"])
- assert "test_a.py" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*test_a.py*")
def test_lastfailed_difference_invocations(self, testdir, monkeypatch):
- monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", "1")
+ monkeypatch.setattr("sys.dont_write_bytecode", True)
testdir.makepyfile(
- test_a="""\
- def test_a1():
- assert 0
- def test_a2():
- assert 1
- """,
- test_b="""\
- def test_b1():
- assert 0
+ test_a="""
+ def test_a1(): assert 0
+ def test_a2(): assert 1
""",
+ test_b="def test_b1(): assert 0",
)
p = testdir.tmpdir.join("test_a.py")
p2 = testdir.tmpdir.join("test_b.py")
@@ -350,36 +318,19 @@ class TestLastFailed:
result.stdout.fnmatch_lines(["*2 failed*"])
result = testdir.runpytest("--lf", p2)
result.stdout.fnmatch_lines(["*1 failed*"])
- p2.write(
- textwrap.dedent(
- """\
- def test_b1():
- assert 1
- """
- )
- )
+
+ testdir.makepyfile(test_b="def test_b1(): assert 1")
result = testdir.runpytest("--lf", p2)
result.stdout.fnmatch_lines(["*1 passed*"])
result = testdir.runpytest("--lf", p)
result.stdout.fnmatch_lines(["*1 failed*1 desel*"])
def test_lastfailed_usecase_splice(self, testdir, monkeypatch):
- monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", "1")
+ monkeypatch.setattr("sys.dont_write_bytecode", True)
testdir.makepyfile(
- """\
- def test_1():
- assert 0
- """
+ "def test_1(): assert 0", test_something="def test_2(): assert 0"
)
p2 = testdir.tmpdir.join("test_something.py")
- p2.write(
- textwrap.dedent(
- """\
- def test_2():
- assert 0
- """
- )
- )
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*2 failed*"])
result = testdir.runpytest("--lf", p2)
@@ -421,18 +372,14 @@ class TestLastFailed:
def test_terminal_report_lastfailed(self, testdir):
test_a = testdir.makepyfile(
test_a="""
- def test_a1():
- pass
- def test_a2():
- pass
+ def test_a1(): pass
+ def test_a2(): pass
"""
)
test_b = testdir.makepyfile(
test_b="""
- def test_b1():
- assert 0
- def test_b2():
- assert 0
+ def test_b1(): assert 0
+ def test_b2(): assert 0
"""
)
result = testdir.runpytest()
@@ -477,10 +424,8 @@ class TestLastFailed:
def test_terminal_report_failedfirst(self, testdir):
testdir.makepyfile(
test_a="""
- def test_a1():
- assert 0
- def test_a2():
- pass
+ def test_a1(): assert 0
+ def test_a2(): pass
"""
)
result = testdir.runpytest()
@@ -527,7 +472,6 @@ class TestLastFailed:
assert list(lastfailed) == ["test_maybe.py::test_hello"]
def test_lastfailed_failure_subset(self, testdir, monkeypatch):
-
testdir.makepyfile(
test_maybe="""
import os
@@ -545,6 +489,7 @@ class TestLastFailed:
env = os.environ
if '1' == env['FAILIMPORT']:
raise ImportError('fail')
+
def test_hello():
assert '0' == env['FAILTEST']
@@ -598,8 +543,7 @@ class TestLastFailed:
"""
import pytest
@pytest.mark.xfail
- def test():
- assert 0
+ def test(): assert 0
"""
)
result = testdir.runpytest()
@@ -611,8 +555,7 @@ class TestLastFailed:
"""
import pytest
@pytest.mark.xfail(strict=True)
- def test():
- pass
+ def test(): pass
"""
)
result = testdir.runpytest()
@@ -626,8 +569,7 @@ class TestLastFailed:
testdir.makepyfile(
"""
import pytest
- def test():
- assert 0
+ def test(): assert 0
"""
)
result = testdir.runpytest()
@@ -640,8 +582,7 @@ class TestLastFailed:
"""
import pytest
@pytest.{mark}
- def test():
- assert 0
+ def test(): assert 0
""".format(
mark=mark
)
@@ -660,11 +601,11 @@ class TestLastFailed:
if quiet:
args.append("-q")
result = testdir.runpytest(*args)
- assert "run all" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*run all*")
result = testdir.runpytest(*args)
if quiet:
- assert "run all" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*run all*")
else:
assert "rerun previous" in result.stdout.str()
@@ -679,18 +620,14 @@ class TestLastFailed:
# 1. initial run
test_bar = testdir.makepyfile(
test_bar="""
- def test_bar_1():
- pass
- def test_bar_2():
- assert 0
+ def test_bar_1(): pass
+ def test_bar_2(): assert 0
"""
)
test_foo = testdir.makepyfile(
test_foo="""
- def test_foo_3():
- pass
- def test_foo_4():
- assert 0
+ def test_foo_3(): pass
+ def test_foo_4(): assert 0
"""
)
testdir.runpytest()
@@ -702,10 +639,8 @@ class TestLastFailed:
# 2. fix test_bar_2, run only test_bar.py
testdir.makepyfile(
test_bar="""
- def test_bar_1():
- pass
- def test_bar_2():
- pass
+ def test_bar_1(): pass
+ def test_bar_2(): pass
"""
)
result = testdir.runpytest(test_bar)
@@ -720,10 +655,8 @@ class TestLastFailed:
# 3. fix test_foo_4, run only test_foo.py
test_foo = testdir.makepyfile(
test_foo="""
- def test_foo_3():
- pass
- def test_foo_4():
- pass
+ def test_foo_3(): pass
+ def test_foo_4(): pass
"""
)
result = testdir.runpytest(test_foo, "--last-failed")
@@ -737,10 +670,8 @@ class TestLastFailed:
def test_lastfailed_no_failures_behavior_all_passed(self, testdir):
testdir.makepyfile(
"""
- def test_1():
- assert True
- def test_2():
- assert True
+ def test_1(): pass
+ def test_2(): pass
"""
)
result = testdir.runpytest()
@@ -762,10 +693,8 @@ class TestLastFailed:
def test_lastfailed_no_failures_behavior_empty_cache(self, testdir):
testdir.makepyfile(
"""
- def test_1():
- assert True
- def test_2():
- assert False
+ def test_1(): pass
+ def test_2(): assert 0
"""
)
result = testdir.runpytest("--lf", "--cache-clear")
@@ -1007,22 +936,12 @@ class TestReadme:
return readme.is_file()
def test_readme_passed(self, testdir):
- testdir.makepyfile(
- """
- def test_always_passes():
- assert 1
- """
- )
+ testdir.makepyfile("def test_always_passes(): pass")
testdir.runpytest()
assert self.check_readme(testdir) is True
def test_readme_failed(self, testdir):
- testdir.makepyfile(
- """
- def test_always_fails():
- assert 0
- """
- )
+ testdir.makepyfile("def test_always_fails(): assert 0")
testdir.runpytest()
assert self.check_readme(testdir) is True
diff --git a/testing/test_capture.py b/testing/test_capture.py
index 139655c72..94af3aef7 100644
--- a/testing/test_capture.py
+++ b/testing/test_capture.py
@@ -7,6 +7,8 @@ import sys
import textwrap
from io import StringIO
from io import UnsupportedOperation
+from typing import List
+from typing import TextIO
import pytest
from _pytest import capture
@@ -90,8 +92,6 @@ class TestCaptureManager:
@pytest.mark.parametrize("method", ["fd", "sys"])
def test_capturing_unicode(testdir, method):
- if hasattr(sys, "pypy_version_info") and sys.pypy_version_info < (2, 2):
- pytest.xfail("does not work on pypy < 2.2")
obj = "'b\u00f6y'"
testdir.makepyfile(
"""\
@@ -451,7 +451,7 @@ class TestCaptureFixture:
"E*capfd*capsys*same*time*",
"*ERROR*setup*test_two*",
"E*capsys*capfd*same*time*",
- "*2 error*",
+ "*2 errors*",
]
)
@@ -603,17 +603,13 @@ class TestCaptureFixture:
)
args = ("-s",) if no_capture else ()
result = testdir.runpytest_subprocess(*args)
- result.stdout.fnmatch_lines(
- """
- *while capture is disabled*
- """
- )
- assert "captured before" not in result.stdout.str()
- assert "captured after" not in result.stdout.str()
+ result.stdout.fnmatch_lines(["*while capture is disabled*", "*= 2 passed in *"])
+ result.stdout.no_fnmatch_line("*captured before*")
+ result.stdout.no_fnmatch_line("*captured after*")
if no_capture:
assert "test_normal executed" in result.stdout.str()
else:
- assert "test_normal executed" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*test_normal executed*")
@pytest.mark.parametrize("fixture", ["capsys", "capfd"])
def test_fixture_use_by_other_fixtures(self, testdir, fixture):
@@ -649,8 +645,8 @@ class TestCaptureFixture:
)
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(["*1 passed*"])
- assert "stdout contents begin" not in result.stdout.str()
- assert "stderr contents begin" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*stdout contents begin*")
+ result.stdout.no_fnmatch_line("*stderr contents begin*")
@pytest.mark.parametrize("cap", ["capsys", "capfd"])
def test_fixture_use_by_other_fixtures_teardown(self, testdir, cap):
@@ -720,7 +716,7 @@ def test_capture_conftest_runtest_setup(testdir):
testdir.makepyfile("def test_func(): pass")
result = testdir.runpytest()
assert result.ret == 0
- assert "hello19" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*hello19*")
def test_capture_badoutput_issue412(testdir):
@@ -824,6 +820,7 @@ def test_dontreadfrominput():
from _pytest.capture import DontReadFromInput
f = DontReadFromInput()
+ assert f.buffer is f
assert not f.isatty()
pytest.raises(IOError, f.read)
pytest.raises(IOError, f.readlines)
@@ -833,20 +830,6 @@ def test_dontreadfrominput():
f.close() # just for completeness
-def test_dontreadfrominput_buffer_python3():
- from _pytest.capture import DontReadFromInput
-
- f = DontReadFromInput()
- fb = f.buffer
- assert not fb.isatty()
- pytest.raises(IOError, fb.read)
- pytest.raises(IOError, fb.readlines)
- iter_f = iter(f)
- pytest.raises(IOError, next, iter_f)
- pytest.raises(ValueError, fb.fileno)
- f.close() # just for completeness
-
-
@pytest.fixture
def tmpfile(testdir):
f = testdir.makepyfile("").open("wb+")
@@ -856,8 +839,8 @@ def tmpfile(testdir):
@needsosdup
-def test_dupfile(tmpfile):
- flist = []
+def test_dupfile(tmpfile) -> None:
+ flist = [] # type: List[TextIO]
for i in range(5):
nf = capture.safe_text_dupfile(tmpfile, "wb")
assert nf != tmpfile
@@ -903,9 +886,9 @@ def lsof_check():
pid = os.getpid()
try:
out = subprocess.check_output(("lsof", "-p", str(pid))).decode()
- except (OSError, subprocess.CalledProcessError, UnicodeDecodeError):
+ except (OSError, subprocess.CalledProcessError, UnicodeDecodeError) as exc:
# about UnicodeDecodeError, see note on pytester
- pytest.skip("could not run 'lsof'")
+ pytest.skip("could not run 'lsof' ({!r})".format(exc))
yield
out2 = subprocess.check_output(("lsof", "-p", str(pid))).decode()
len1 = len([x for x in out.split("\n") if "REG" in x])
@@ -1387,7 +1370,7 @@ def test_crash_on_closing_tmpfile_py27(testdir):
result = testdir.runpytest_subprocess(str(p))
assert result.ret == 0
assert result.stderr.str() == ""
- assert "IOError" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*IOError*")
def test_pickling_and_unpickling_encoded_file():
@@ -1501,11 +1484,9 @@ def test_typeerror_encodedfile_write(testdir):
"""
)
result_without_capture = testdir.runpytest("-s", str(p))
-
result_with_capture = testdir.runpytest(str(p))
assert result_with_capture.ret == result_without_capture.ret
-
result_with_capture.stdout.fnmatch_lines(
- ["E TypeError: write() argument must be str, not bytes"]
+ ["E * TypeError: write() argument must be str, not bytes"]
)
diff --git a/testing/test_collection.py b/testing/test_collection.py
index fe5d66e94..b791ac6f9 100644
--- a/testing/test_collection.py
+++ b/testing/test_collection.py
@@ -139,7 +139,7 @@ class TestCollectFS:
# by default, ignore tests inside a virtualenv
result = testdir.runpytest()
- assert "test_invenv" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*test_invenv*")
# allow test collection if user insists
result = testdir.runpytest("--collect-in-virtualenv")
assert "test_invenv" in result.stdout.str()
@@ -165,7 +165,7 @@ class TestCollectFS:
testfile = testdir.tmpdir.ensure(".virtual", "test_invenv.py")
testfile.write("def test_hello(): pass")
result = testdir.runpytest("--collect-in-virtualenv")
- assert "test_invenv" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*test_invenv*")
# ...unless the virtualenv is explicitly given on the CLI
result = testdir.runpytest("--collect-in-virtualenv", ".virtual")
assert "test_invenv" in result.stdout.str()
@@ -364,7 +364,7 @@ class TestCustomConftests:
testdir.makepyfile(test_world="def test_hello(): pass")
result = testdir.runpytest()
assert result.ret == ExitCode.NO_TESTS_COLLECTED
- assert "passed" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*passed*")
result = testdir.runpytest("--XX")
assert result.ret == 0
assert "passed" in result.stdout.str()
@@ -402,7 +402,7 @@ class TestCustomConftests:
)
testdir.mkdir("sub")
testdir.makepyfile("def test_x(): pass")
- result = testdir.runpytest("--collect-only")
+ result = testdir.runpytest("--co")
result.stdout.fnmatch_lines(["*MyModule*", "*test_x*"])
def test_pytest_collect_file_from_sister_dir(self, testdir):
@@ -433,7 +433,7 @@ class TestCustomConftests:
p = testdir.makepyfile("def test_x(): pass")
p.copy(sub1.join(p.basename))
p.copy(sub2.join(p.basename))
- result = testdir.runpytest("--collect-only")
+ result = testdir.runpytest("--co")
result.stdout.fnmatch_lines(["*MyModule1*", "*MyModule2*", "*test_x*"])
@@ -486,7 +486,7 @@ class TestSession:
p = testdir.makepyfile("def test_func(): pass")
id = "::".join([p.basename, "test_func"])
items, hookrec = testdir.inline_genitems(id)
- item, = items
+ (item,) = items
assert item.name == "test_func"
newid = item.nodeid
assert newid == id
@@ -605,9 +605,9 @@ class TestSession:
testdir.makepyfile("def test_func(): pass")
items, hookrec = testdir.inline_genitems()
assert len(items) == 1
- item, = items
+ (item,) = items
items2, hookrec = testdir.inline_genitems(item.nodeid)
- item2, = items2
+ (item2,) = items2
assert item2.name == item.name
assert item2.fspath == item.fspath
@@ -622,7 +622,7 @@ class TestSession:
arg = p.basename + "::TestClass::test_method"
items, hookrec = testdir.inline_genitems(arg)
assert len(items) == 1
- item, = items
+ (item,) = items
assert item.nodeid.endswith("TestClass::test_method")
# ensure we are reporting the collection of the single test item (#2464)
assert [x.name for x in self.get_reported_items(hookrec)] == ["test_method"]
@@ -859,12 +859,16 @@ def test_exit_on_collection_with_maxfail_smaller_than_n_errors(testdir):
res = testdir.runpytest("--maxfail=1")
assert res.ret == 1
-
res.stdout.fnmatch_lines(
- ["*ERROR collecting test_02_import_error.py*", "*No module named *asdfa*"]
+ [
+ "collected 1 item / 1 error",
+ "*ERROR collecting test_02_import_error.py*",
+ "*No module named *asdfa*",
+ "*! stopping after 1 failures !*",
+ "*= 1 error in *",
+ ]
)
-
- assert "test_03" not in res.stdout.str()
+ res.stdout.no_fnmatch_line("*test_03*")
def test_exit_on_collection_with_maxfail_bigger_than_n_errors(testdir):
@@ -876,7 +880,6 @@ def test_exit_on_collection_with_maxfail_bigger_than_n_errors(testdir):
res = testdir.runpytest("--maxfail=4")
assert res.ret == 2
-
res.stdout.fnmatch_lines(
[
"collected 2 items / 2 errors",
@@ -884,6 +887,8 @@ def test_exit_on_collection_with_maxfail_bigger_than_n_errors(testdir):
"*No module named *asdfa*",
"*ERROR collecting test_03_import_error.py*",
"*No module named *asdfa*",
+ "*! Interrupted: 2 errors during collection !*",
+ "*= 2 errors in *",
]
)
@@ -899,7 +904,7 @@ def test_continue_on_collection_errors(testdir):
assert res.ret == 1
res.stdout.fnmatch_lines(
- ["collected 2 items / 2 errors", "*1 failed, 1 passed, 2 error*"]
+ ["collected 2 items / 2 errors", "*1 failed, 1 passed, 2 errors*"]
)
@@ -916,7 +921,7 @@ def test_continue_on_collection_errors_maxfail(testdir):
res = testdir.runpytest("--continue-on-collection-errors", "--maxfail=3")
assert res.ret == 1
- res.stdout.fnmatch_lines(["collected 2 items / 2 errors", "*1 failed, 2 error*"])
+ res.stdout.fnmatch_lines(["collected 2 items / 2 errors", "*1 failed, 2 errors*"])
def test_fixture_scope_sibling_conftests(testdir):
@@ -1003,12 +1008,12 @@ def test_collect_init_tests(testdir):
result.stdout.fnmatch_lines(
["<Package */tests>", " <Module test_foo.py>", " <Function test_foo>"]
)
- assert "test_init" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*test_init*")
result = testdir.runpytest("./tests/__init__.py", "--collect-only")
result.stdout.fnmatch_lines(
["<Package */tests>", " <Module __init__.py>", " <Function test_init>"]
)
- assert "test_foo" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*test_foo*")
def test_collect_invalid_signature_message(testdir):
@@ -1260,7 +1265,7 @@ def test_collector_respects_tbstyle(testdir):
' File "*/test_collector_respects_tbstyle.py", line 1, in <module>',
" assert 0",
"AssertionError: assert 0",
- "*! Interrupted: 1 errors during collection !*",
+ "*! Interrupted: 1 error during collection !*",
"*= 1 error in *",
]
)
diff --git a/testing/test_compat.py b/testing/test_compat.py
index 94dac439d..04d818b4e 100644
--- a/testing/test_compat.py
+++ b/testing/test_compat.py
@@ -4,6 +4,7 @@ from functools import wraps
import pytest
from _pytest.compat import _PytestWrapper
+from _pytest.compat import cached_property
from _pytest.compat import get_real_func
from _pytest.compat import is_generator
from _pytest.compat import safe_getattr
@@ -178,3 +179,23 @@ def test_safe_isclass():
assert False, "Should be ignored"
assert safe_isclass(CrappyClass()) is False
+
+
+def test_cached_property() -> None:
+ ncalls = 0
+
+ class Class:
+ @cached_property
+ def prop(self) -> int:
+ nonlocal ncalls
+ ncalls += 1
+ return ncalls
+
+ c1 = Class()
+ assert ncalls == 0
+ assert c1.prop == 1
+ assert c1.prop == 1
+ c2 = Class()
+ assert ncalls == 1
+ assert c2.prop == 2
+ assert c1.prop == 1
diff --git a/testing/test_config.py b/testing/test_config.py
index 71dae5c4c..f146b52a4 100644
--- a/testing/test_config.py
+++ b/testing/test_config.py
@@ -1,17 +1,18 @@
import os
import sys
import textwrap
-from pathlib import Path
import _pytest._code
import pytest
from _pytest.compat import importlib_metadata
from _pytest.config import _iter_rewritable_modules
+from _pytest.config import Config
from _pytest.config.exceptions import UsageError
from _pytest.config.findpaths import determine_setup
from _pytest.config.findpaths import get_common_ancestor
from _pytest.config.findpaths import getcfg
from _pytest.main import ExitCode
+from _pytest.pathlib import Path
class TestParseIni:
@@ -456,7 +457,7 @@ class TestConfigFromdictargs:
config = Config.fromdictargs(option_dict, args)
assert config.args == ["a", "b"]
- assert config.invocation_params.args == args
+ assert config.invocation_params.args == tuple(args)
assert config.option.verbose == 4
assert config.option.capture == "no"
@@ -1235,7 +1236,7 @@ def test_invocation_args(testdir):
call = calls[0]
config = call.item.config
- assert config.invocation_params.args == [p, "-v"]
+ assert config.invocation_params.args == (p, "-v")
assert config.invocation_params.dir == Path(str(testdir.tmpdir))
plugins = config.invocation_params.plugins
@@ -1243,6 +1244,10 @@ def test_invocation_args(testdir):
assert plugins[0] is plugin
assert type(plugins[1]).__name__ == "Collect" # installed by testdir.inline_run()
+ # args cannot be None
+ with pytest.raises(TypeError):
+ Config.InvocationParams(args=None, plugins=None, dir=Path())
+
@pytest.mark.parametrize(
"plugin",
@@ -1286,7 +1291,7 @@ def test_config_blocked_default_plugins(testdir, plugin):
if plugin != "terminal":
result.stdout.fnmatch_lines(["* 1 failed in *"])
else:
- assert result.stdout.lines == [""]
+ assert result.stdout.lines == []
class TestSetupCfg:
diff --git a/testing/test_conftest.py b/testing/test_conftest.py
index 3f08ee381..2918ff04c 100644
--- a/testing/test_conftest.py
+++ b/testing/test_conftest.py
@@ -1,12 +1,12 @@
import os
import textwrap
-from pathlib import Path
import py
import pytest
from _pytest.config import PytestPluginManager
from _pytest.main import ExitCode
+from _pytest.pathlib import Path
def ConftestWithSetinitial(path):
@@ -187,7 +187,7 @@ def test_conftest_confcutdir(testdir):
)
result = testdir.runpytest("-h", "--confcutdir=%s" % x, x)
result.stdout.fnmatch_lines(["*--xyz*"])
- assert "warning: could not load initial" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*warning: could not load initial*")
@pytest.mark.skipif(
@@ -648,5 +648,5 @@ def test_required_option_help(testdir):
)
)
result = testdir.runpytest("-h", x)
- assert "argument --xyz is required" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*argument --xyz is required*")
assert "general:" in result.stdout.str()
diff --git a/testing/test_doctest.py b/testing/test_doctest.py
index 4aac5432d..79095e3e7 100644
--- a/testing/test_doctest.py
+++ b/testing/test_doctest.py
@@ -239,8 +239,8 @@ class TestDoctests:
]
)
# lines below should be trimmed out
- assert "text-line-2" not in result.stdout.str()
- assert "text-line-after" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*text-line-2*")
+ result.stdout.no_fnmatch_line("*text-line-after*")
def test_docstring_full_context_around_error(self, testdir):
"""Test that we show the whole context before the actual line of a failing
@@ -334,7 +334,7 @@ class TestDoctests:
[
"*ERROR collecting hello.py*",
"*{e}: No module named *asdals*".format(e=MODULE_NOT_FOUND_ERROR),
- "*Interrupted: 1 errors during collection*",
+ "*Interrupted: 1 error during collection*",
]
)
@@ -839,7 +839,8 @@ class TestLiterals:
reprec = testdir.inline_run()
reprec.assertoutcome(failed=1)
- def test_number_re(self):
+ def test_number_re(self) -> None:
+ _number_re = _get_checker()._number_re # type: ignore
for s in [
"1.",
"+1.",
@@ -861,12 +862,12 @@ class TestLiterals:
"-1.2e-3",
]:
print(s)
- m = _get_checker()._number_re.match(s)
+ m = _number_re.match(s)
assert m is not None
assert float(m.group()) == pytest.approx(float(s))
for s in ["1", "abc"]:
print(s)
- assert _get_checker()._number_re.match(s) is None
+ assert _number_re.match(s) is None
@pytest.mark.parametrize("config_mode", ["ini", "comment"])
def test_number_precision(self, testdir, config_mode):
@@ -1177,7 +1178,7 @@ class TestDoctestAutoUseFixtures:
"""
)
result = testdir.runpytest("--doctest-modules")
- assert "FAILURES" not in str(result.stdout.str())
+ result.stdout.no_fnmatch_line("*FAILURES*")
result.stdout.fnmatch_lines(["*=== 1 passed in *"])
@pytest.mark.parametrize("scope", SCOPES)
@@ -1209,7 +1210,7 @@ class TestDoctestAutoUseFixtures:
"""
)
result = testdir.runpytest("--doctest-modules")
- assert "FAILURES" not in str(result.stdout.str())
+ str(result.stdout.no_fnmatch_line("*FAILURES*"))
result.stdout.fnmatch_lines(["*=== 1 passed in *"])
diff --git a/testing/test_faulthandler.py b/testing/test_faulthandler.py
index a0cf1d8c1..e99206a4d 100644
--- a/testing/test_faulthandler.py
+++ b/testing/test_faulthandler.py
@@ -58,13 +58,13 @@ def test_timeout(testdir, enabled):
"""
import time
def test_timeout():
- time.sleep(2.0)
+ time.sleep(0.1)
"""
)
testdir.makeini(
"""
[pytest]
- faulthandler_timeout = 1
+ faulthandler_timeout = 0.01
"""
)
args = ["-p", "no:faulthandler"] if not enabled else []
diff --git a/testing/test_junitxml.py b/testing/test_junitxml.py
index d4a1f6cc3..4c2f22a3d 100644
--- a/testing/test_junitxml.py
+++ b/testing/test_junitxml.py
@@ -1,7 +1,6 @@
import os
import platform
from datetime import datetime
-from pathlib import Path
from xml.dom import minidom
import py
@@ -9,6 +8,7 @@ import xmlschema
import pytest
from _pytest.junitxml import LogXML
+from _pytest.pathlib import Path
from _pytest.reports import BaseReport
@@ -477,22 +477,25 @@ class TestPython:
assert "ValueError" in fnode.toxml()
systemout = fnode.next_sibling
assert systemout.tag == "system-out"
- assert "hello-stdout" in systemout.toxml()
- assert "info msg" not in systemout.toxml()
+ systemout_xml = systemout.toxml()
+ assert "hello-stdout" in systemout_xml
+ assert "info msg" not in systemout_xml
systemerr = systemout.next_sibling
assert systemerr.tag == "system-err"
- assert "hello-stderr" in systemerr.toxml()
- assert "info msg" not in systemerr.toxml()
+ systemerr_xml = systemerr.toxml()
+ assert "hello-stderr" in systemerr_xml
+ assert "info msg" not in systemerr_xml
if junit_logging == "system-out":
- assert "warning msg" in systemout.toxml()
- assert "warning msg" not in systemerr.toxml()
+ assert "warning msg" in systemout_xml
+ assert "warning msg" not in systemerr_xml
elif junit_logging == "system-err":
- assert "warning msg" not in systemout.toxml()
- assert "warning msg" in systemerr.toxml()
- elif junit_logging == "no":
- assert "warning msg" not in systemout.toxml()
- assert "warning msg" not in systemerr.toxml()
+ assert "warning msg" not in systemout_xml
+ assert "warning msg" in systemerr_xml
+ else:
+ assert junit_logging == "no"
+ assert "warning msg" not in systemout_xml
+ assert "warning msg" not in systemerr_xml
@parametrize_families
def test_failure_verbose_message(self, testdir, run_and_parse, xunit_family):
@@ -1216,7 +1219,7 @@ def test_runs_twice(testdir, run_and_parse):
)
result, dom = run_and_parse(f, f)
- assert "INTERNALERROR" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*INTERNALERROR*")
first, second = [x["classname"] for x in dom.find_by_tag("testcase")]
assert first == second
@@ -1231,7 +1234,7 @@ def test_runs_twice_xdist(testdir, run_and_parse):
)
result, dom = run_and_parse(f, "--dist", "each", "--tx", "2*popen")
- assert "INTERNALERROR" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*INTERNALERROR*")
first, second = [x["classname"] for x in dom.find_by_tag("testcase")]
assert first == second
@@ -1271,7 +1274,7 @@ def test_fancy_items_regression(testdir, run_and_parse):
result, dom = run_and_parse()
- assert "INTERNALERROR" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*INTERNALERROR*")
items = sorted("%(classname)s %(name)s" % x for x in dom.find_by_tag("testcase"))
import pprint
diff --git a/testing/test_mark.py b/testing/test_mark.py
index 764d523a3..0e4422025 100644
--- a/testing/test_mark.py
+++ b/testing/test_mark.py
@@ -314,6 +314,21 @@ def test_keyword_option_parametrize(spec, testdir):
assert list(passed) == list(passed_result)
+def test_parametrize_with_module(testdir):
+ testdir.makepyfile(
+ """
+ import pytest
+ @pytest.mark.parametrize("arg", [pytest,])
+ def test_func(arg):
+ pass
+ """
+ )
+ rec = testdir.inline_run()
+ passed, skipped, fail = rec.listoutcomes()
+ expected_id = "test_func[" + pytest.__name__ + "]"
+ assert passed[0].nodeid.split("::")[-1] == expected_id
+
+
@pytest.mark.parametrize(
"spec",
[
@@ -831,6 +846,12 @@ class TestMarkDecorator:
def test__eq__(self, lhs, rhs, expected):
assert (lhs == rhs) == expected
+ def test_aliases(self) -> None:
+ md = pytest.mark.foo(1, "2", three=3)
+ assert md.name == "foo"
+ assert md.args == (1, "2")
+ assert md.kwargs == {"three": 3}
+
@pytest.mark.parametrize("mark", [None, "", "skip", "xfail"])
def test_parameterset_for_parametrize_marks(testdir, mark):
@@ -891,7 +912,7 @@ def test_parameterset_for_fail_at_collect(testdir):
result = testdir.runpytest(str(p1))
result.stdout.fnmatch_lines(
[
- "collected 0 items / 1 errors",
+ "collected 0 items / 1 error",
"* ERROR collecting test_parameterset_for_fail_at_collect.py *",
"Empty parameter set in 'test' at line 3",
"*= 1 error in *",
@@ -990,7 +1011,7 @@ def test_markers_from_parametrize(testdir):
def test_pytest_param_id_requires_string():
with pytest.raises(TypeError) as excinfo:
pytest.param(id=True)
- msg, = excinfo.value.args
+ (msg,) = excinfo.value.args
assert msg == "Expected id to be a string, got <class 'bool'>: True"
diff --git a/testing/test_meta.py b/testing/test_meta.py
index 7aa100e6e..296aa42aa 100644
--- a/testing/test_meta.py
+++ b/testing/test_meta.py
@@ -15,6 +15,7 @@ def _modules():
)
+@pytest.mark.slow
@pytest.mark.parametrize("module", _modules())
def test_no_warnings(module):
# fmt: off
diff --git a/testing/test_pdb.py b/testing/test_pdb.py
index 55a7230f4..25d2292e9 100644
--- a/testing/test_pdb.py
+++ b/testing/test_pdb.py
@@ -193,7 +193,7 @@ class TestPDB:
)
child = testdir.spawn_pytest("-rs --pdb %s" % p1)
child.expect("Skipping also with pdb active")
- child.expect("1 skipped in")
+ child.expect_exact("= \x1b[33m\x1b[1m1 skipped\x1b[0m\x1b[33m in")
child.sendeof()
self.flush(child)
@@ -221,7 +221,7 @@ class TestPDB:
child.sendeof()
rest = child.read().decode("utf8")
assert "Exit: Quitting debugger" in rest
- assert "= 1 failed in" in rest
+ assert "= \x1b[31m\x1b[1m1 failed\x1b[0m\x1b[31m in" in rest
assert "def test_1" not in rest
assert "get rekt" not in rest
self.flush(child)
@@ -466,7 +466,6 @@ class TestPDB:
def test_pdb_interaction_doctest(self, testdir, monkeypatch):
p1 = testdir.makepyfile(
"""
- import pytest
def function_1():
'''
>>> i = 0
@@ -485,9 +484,32 @@ class TestPDB:
child.sendeof()
rest = child.read().decode("utf8")
+ assert "! _pytest.outcomes.Exit: Quitting debugger !" in rest
+ assert "BdbQuit" not in rest
assert "1 failed" in rest
self.flush(child)
+ def test_doctest_set_trace_quit(self, testdir, monkeypatch):
+ p1 = testdir.makepyfile(
+ """
+ def function_1():
+ '''
+ >>> __import__('pdb').set_trace()
+ '''
+ """
+ )
+ # NOTE: does not use pytest.set_trace, but Python's patched pdb,
+ # therefore "-s" is required.
+ child = testdir.spawn_pytest("--doctest-modules --pdb -s %s" % p1)
+ child.expect("Pdb")
+ child.sendline("q")
+ rest = child.read().decode("utf8")
+
+ assert "! _pytest.outcomes.Exit: Quitting debugger !" in rest
+ assert "= \x1b[33mno tests ran\x1b[0m\x1b[33m in" in rest
+ assert "BdbQuit" not in rest
+ assert "UNEXPECTED EXCEPTION" not in rest
+
def test_pdb_interaction_capturing_twice(self, testdir):
p1 = testdir.makepyfile(
"""
@@ -703,7 +725,7 @@ class TestPDB:
assert "> PDB continue (IO-capturing resumed) >" in rest
else:
assert "> PDB continue >" in rest
- assert "1 passed in" in rest
+ assert "= \x1b[32m\x1b[1m1 passed\x1b[0m\x1b[32m in" in rest
def test_pdb_used_outside_test(self, testdir):
p1 = testdir.makepyfile(
@@ -1019,7 +1041,7 @@ class TestTraceOption:
child.sendline("q")
child.expect_exact("Exit: Quitting debugger")
rest = child.read().decode("utf8")
- assert "2 passed in" in rest
+ assert "= \x1b[32m\x1b[1m2 passed\x1b[0m\x1b[32m in" in rest
assert "reading from stdin while output" not in rest
# Only printed once - not on stderr.
assert "Exit: Quitting debugger" not in child.before.decode("utf8")
@@ -1064,7 +1086,7 @@ class TestTraceOption:
child.sendline("c")
child.expect_exact("> PDB continue (IO-capturing resumed) >")
rest = child.read().decode("utf8")
- assert "6 passed in" in rest
+ assert "= \x1b[32m\x1b[1m6 passed\x1b[0m\x1b[32m in" in rest
assert "reading from stdin while output" not in rest
# Only printed once - not on stderr.
assert "Exit: Quitting debugger" not in child.before.decode("utf8")
@@ -1175,7 +1197,7 @@ def test_pdb_suspends_fixture_capturing(testdir, fixture):
TestPDB.flush(child)
assert child.exitstatus == 0
- assert "= 1 passed in " in rest
+ assert "= \x1b[32m\x1b[1m1 passed\x1b[0m\x1b[32m in" in rest
assert "> PDB continue (IO-capturing resumed for fixture %s) >" % (fixture) in rest
diff --git a/testing/test_pluginmanager.py b/testing/test_pluginmanager.py
index 97f220ca5..836b458c6 100644
--- a/testing/test_pluginmanager.py
+++ b/testing/test_pluginmanager.py
@@ -135,6 +135,36 @@ class TestPytestPluginInteractions:
ihook_b = session.gethookproxy(testdir.tmpdir.join("tests"))
assert ihook_a is not ihook_b
+ def test_hook_with_addoption(self, testdir):
+ """Test that hooks can be used in a call to pytest_addoption"""
+ testdir.makepyfile(
+ newhooks="""
+ import pytest
+ @pytest.hookspec(firstresult=True)
+ def pytest_default_value():
+ pass
+ """
+ )
+ testdir.makepyfile(
+ myplugin="""
+ import newhooks
+ def pytest_addhooks(pluginmanager):
+ pluginmanager.add_hookspecs(newhooks)
+ def pytest_addoption(parser, pluginmanager):
+ default_value = pluginmanager.hook.pytest_default_value()
+ parser.addoption("--config", help="Config, defaults to %(default)s", default=default_value)
+ """
+ )
+ testdir.makeconftest(
+ """
+ pytest_plugins=("myplugin",)
+ def pytest_default_value():
+ return "default_value"
+ """
+ )
+ res = testdir.runpytest("--help")
+ res.stdout.fnmatch_lines(["*--config=CONFIG*default_value*"])
+
def test_default_markers(testdir):
result = testdir.runpytest("--markers")
diff --git a/testing/test_pytester.py b/testing/test_pytester.py
index d330ff253..758e999dc 100644
--- a/testing/test_pytester.py
+++ b/testing/test_pytester.py
@@ -121,17 +121,6 @@ def test_runresult_assertion_on_xpassed(testdir):
assert result.ret == 0
-def test_runresult_repr():
- from _pytest.pytester import RunResult
-
- assert (
- repr(
- RunResult(ret="ret", outlines=[""], errlines=["some", "errors"], duration=1)
- )
- == "<RunResult ret='ret' len(stdout.lines)=1 len(stderr.lines)=2 duration=1.00s>"
- )
-
-
def test_xpassed_with_strict_is_considered_a_failure(testdir):
testdir.makepyfile(
"""
@@ -406,6 +395,27 @@ def test_testdir_subprocess(testdir):
assert testdir.runpytest_subprocess(testfile).ret == 0
+def test_testdir_subprocess_via_runpytest_arg(testdir) -> None:
+ testfile = testdir.makepyfile(
+ """
+ def test_testdir_subprocess(testdir):
+ import os
+ testfile = testdir.makepyfile(
+ \"""
+ import os
+ def test_one():
+ assert {} != os.getpid()
+ \""".format(os.getpid())
+ )
+ assert testdir.runpytest(testfile).ret == 0
+ """
+ )
+ result = testdir.runpytest_subprocess(
+ "-p", "pytester", "--runpytest", "subprocess", testfile
+ )
+ assert result.ret == 0
+
+
def test_unicode_args(testdir):
result = testdir.runpytest("-k", "💩")
assert result.ret == ExitCode.NO_TESTS_COLLECTED
@@ -457,6 +467,81 @@ def test_linematcher_with_nonlist():
assert lm._getlines(set()) == set()
+def test_linematcher_match_failure():
+ lm = LineMatcher(["foo", "foo", "bar"])
+ with pytest.raises(pytest.fail.Exception) as e:
+ lm.fnmatch_lines(["foo", "f*", "baz"])
+ assert e.value.msg.splitlines() == [
+ "exact match: 'foo'",
+ "fnmatch: 'f*'",
+ " with: 'foo'",
+ "nomatch: 'baz'",
+ " and: 'bar'",
+ "remains unmatched: 'baz'",
+ ]
+
+ lm = LineMatcher(["foo", "foo", "bar"])
+ with pytest.raises(pytest.fail.Exception) as e:
+ lm.re_match_lines(["foo", "^f.*", "baz"])
+ assert e.value.msg.splitlines() == [
+ "exact match: 'foo'",
+ "re.match: '^f.*'",
+ " with: 'foo'",
+ " nomatch: 'baz'",
+ " and: 'bar'",
+ "remains unmatched: 'baz'",
+ ]
+
+
+@pytest.mark.parametrize("function", ["no_fnmatch_line", "no_re_match_line"])
+def test_no_matching(function):
+ if function == "no_fnmatch_line":
+ good_pattern = "*.py OK*"
+ bad_pattern = "*X.py OK*"
+ else:
+ assert function == "no_re_match_line"
+ good_pattern = r".*py OK"
+ bad_pattern = r".*Xpy OK"
+
+ lm = LineMatcher(
+ [
+ "cachedir: .pytest_cache",
+ "collecting ... collected 1 item",
+ "",
+ "show_fixtures_per_test.py OK",
+ "=== elapsed 1s ===",
+ ]
+ )
+
+ # check the function twice to ensure we don't accumulate the internal buffer
+ for i in range(2):
+ with pytest.raises(pytest.fail.Exception) as e:
+ func = getattr(lm, function)
+ func(good_pattern)
+ obtained = str(e.value).splitlines()
+ if function == "no_fnmatch_line":
+ assert obtained == [
+ "nomatch: '{}'".format(good_pattern),
+ " and: 'cachedir: .pytest_cache'",
+ " and: 'collecting ... collected 1 item'",
+ " and: ''",
+ "fnmatch: '{}'".format(good_pattern),
+ " with: 'show_fixtures_per_test.py OK'",
+ ]
+ else:
+ assert obtained == [
+ "nomatch: '{}'".format(good_pattern),
+ " and: 'cachedir: .pytest_cache'",
+ " and: 'collecting ... collected 1 item'",
+ " and: ''",
+ "re.match: '{}'".format(good_pattern),
+ " with: 'show_fixtures_per_test.py OK'",
+ ]
+
+ func = getattr(lm, function)
+ func(bad_pattern) # bad pattern does not match any line: passes
+
+
def test_pytester_addopts(request, monkeypatch):
monkeypatch.setenv("PYTEST_ADDOPTS", "--orig-unused")
@@ -570,3 +655,22 @@ def test_spawn_uses_tmphome(testdir):
child = testdir.spawn_pytest(str(p1))
out = child.read()
assert child.wait() == 0, out.decode("utf8")
+
+
+def test_run_result_repr():
+ outlines = ["some", "normal", "output"]
+ errlines = ["some", "nasty", "errors", "happened"]
+
+ # known exit code
+ r = pytester.RunResult(1, outlines, errlines, duration=0.5)
+ assert (
+ repr(r) == "<RunResult ret=ExitCode.TESTS_FAILED len(stdout.lines)=3"
+ " len(stderr.lines)=4 duration=0.50s>"
+ )
+
+ # unknown exit code: just the number
+ r = pytester.RunResult(99, outlines, errlines, duration=0.5)
+ assert (
+ repr(r) == "<RunResult ret=99 len(stdout.lines)=3"
+ " len(stderr.lines)=4 duration=0.50s>"
+ )
diff --git a/testing/test_reports.py b/testing/test_reports.py
index 9f6c56186..ff813543c 100644
--- a/testing/test_reports.py
+++ b/testing/test_reports.py
@@ -330,7 +330,7 @@ class TestHooks:
data = pytestconfig.hook.pytest_report_to_serializable(
config=pytestconfig, report=rep
)
- assert data["_report_type"] == "TestReport"
+ assert data["$report_type"] == "TestReport"
new_rep = pytestconfig.hook.pytest_report_from_serializable(
config=pytestconfig, data=data
)
@@ -352,7 +352,7 @@ class TestHooks:
data = pytestconfig.hook.pytest_report_to_serializable(
config=pytestconfig, report=rep
)
- assert data["_report_type"] == "CollectReport"
+ assert data["$report_type"] == "CollectReport"
new_rep = pytestconfig.hook.pytest_report_from_serializable(
config=pytestconfig, data=data
)
@@ -376,7 +376,7 @@ class TestHooks:
data = pytestconfig.hook.pytest_report_to_serializable(
config=pytestconfig, report=rep
)
- data["_report_type"] = "Unknown"
+ data["$report_type"] = "Unknown"
with pytest.raises(AssertionError):
_ = pytestconfig.hook.pytest_report_from_serializable(
config=pytestconfig, data=data
diff --git a/testing/test_runner.py b/testing/test_runner.py
index 63a94d258..86e9bddff 100644
--- a/testing/test_runner.py
+++ b/testing/test_runner.py
@@ -483,13 +483,22 @@ def test_callinfo():
assert ci.result == 0
assert "result" in repr(ci)
assert repr(ci) == "<CallInfo when='123' result: 0>"
+ assert str(ci) == "<CallInfo when='123' result: 0>"
ci = runner.CallInfo.from_call(lambda: 0 / 0, "123")
assert ci.when == "123"
assert not hasattr(ci, "result")
- assert repr(ci) == "<CallInfo when='123' exception: division by zero>"
+ assert repr(ci) == "<CallInfo when='123' excinfo={!r}>".format(ci.excinfo)
+ assert str(ci) == repr(ci)
assert ci.excinfo
- assert "exc" in repr(ci)
+
+ # Newlines are escaped.
+ def raise_assertion():
+ assert 0, "assert_msg"
+
+ ci = runner.CallInfo.from_call(raise_assertion, "call")
+ assert repr(ci) == "<CallInfo when='call' excinfo={!r}>".format(ci.excinfo)
+ assert "\n" not in repr(ci)
# design question: do we want general hooks in python files?
@@ -588,7 +597,7 @@ def test_pytest_exit_returncode(testdir):
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*! *Exit: some exit msg !*"])
- assert _strip_resource_warnings(result.stderr.lines) == [""]
+ assert _strip_resource_warnings(result.stderr.lines) == []
assert result.ret == 99
# It prints to stderr also in case of exit during pytest_sessionstart.
@@ -603,8 +612,7 @@ def test_pytest_exit_returncode(testdir):
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*! *Exit: during_sessionstart !*"])
assert _strip_resource_warnings(result.stderr.lines) == [
- "Exit: during_sessionstart",
- "",
+ "Exit: during_sessionstart"
]
assert result.ret == 98
@@ -622,7 +630,7 @@ def test_pytest_fail_notrace_runtest(testdir):
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["world", "hello"])
- assert "def teardown_function" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*def teardown_function*")
def test_pytest_fail_notrace_collection(testdir):
@@ -637,7 +645,7 @@ def test_pytest_fail_notrace_collection(testdir):
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["hello"])
- assert "def some_internal_function()" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*def some_internal_function()*")
def test_pytest_fail_notrace_non_ascii(testdir):
@@ -655,7 +663,7 @@ def test_pytest_fail_notrace_non_ascii(testdir):
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*test_hello*", "oh oh: ☺"])
- assert "def test_hello" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*def test_hello*")
def test_pytest_no_tests_collected_exit_status(testdir):
@@ -820,7 +828,7 @@ def test_failure_in_setup(testdir):
"""
)
result = testdir.runpytest("--tb=line")
- assert "def setup_module" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*def setup_module*")
def test_makereport_getsource(testdir):
@@ -832,7 +840,7 @@ def test_makereport_getsource(testdir):
"""
)
result = testdir.runpytest()
- assert "INTERNALERROR" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*INTERNALERROR*")
result.stdout.fnmatch_lines(["*else: assert False*"])
@@ -863,7 +871,7 @@ def test_makereport_getsource_dynamic_code(testdir, monkeypatch):
"""
)
result = testdir.runpytest("-vv")
- assert "INTERNALERROR" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*INTERNALERROR*")
result.stdout.fnmatch_lines(["*test_fix*", "*fixture*'missing'*not found*"])
diff --git a/testing/test_runner_xunit.py b/testing/test_runner_xunit.py
index 34a086551..0ff508d2c 100644
--- a/testing/test_runner_xunit.py
+++ b/testing/test_runner_xunit.py
@@ -234,10 +234,10 @@ def test_setup_funcarg_setup_when_outer_scope_fails(testdir):
"*ValueError*42*",
"*function2*",
"*ValueError*42*",
- "*2 error*",
+ "*2 errors*",
]
)
- assert "xyz43" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*xyz43*")
@pytest.mark.parametrize("arg", ["", "arg"])
diff --git a/testing/test_session.py b/testing/test_session.py
index dbe057376..7b4eb817a 100644
--- a/testing/test_session.py
+++ b/testing/test_session.py
@@ -102,15 +102,20 @@ class SessionTests:
p = testdir.makepyfile(
"""
import pytest
+
+ class reprexc(BaseException):
+ def __str__(self):
+ return "Ha Ha fooled you, I'm a broken repr()."
+
class BrokenRepr1(object):
foo=0
def __repr__(self):
- raise Exception("Ha Ha fooled you, I'm a broken repr().")
+ raise reprexc
class TestBrokenClass(object):
def test_explicit_bad_repr(self):
t = BrokenRepr1()
- with pytest.raises(Exception, match="I'm a broken repr"):
+ with pytest.raises(BaseException, match="broken repr"):
repr(t)
def test_implicit_bad_repr1(self):
@@ -123,12 +128,7 @@ class SessionTests:
passed, skipped, failed = reprec.listoutcomes()
assert (len(passed), len(skipped), len(failed)) == (1, 0, 1)
out = failed[0].longrepr.reprcrash.message
- assert (
- out.find(
- """[Exception("Ha Ha fooled you, I'm a broken repr().") raised in repr()]"""
- )
- != -1
- )
+ assert out.find("<[reprexc() raised in repr()] BrokenRepr1") != -1
def test_broken_repr_with_showlocals_verbose(self, testdir):
p = testdir.makepyfile(
@@ -151,7 +151,7 @@ class SessionTests:
assert repr_locals.lines
assert len(repr_locals.lines) == 1
assert repr_locals.lines[0].startswith(
- 'x = <[NotImplementedError("") raised in repr()] ObjWithErrorInRepr'
+ "x = <[NotImplementedError() raised in repr()] ObjWithErrorInRepr"
)
def test_skip_file_by_conftest(self, testdir):
diff --git a/testing/python/setup_only.py b/testing/test_setuponly.py
index 02bc65fac..7549874db 100644
--- a/testing/python/setup_only.py
+++ b/testing/test_setuponly.py
@@ -28,7 +28,7 @@ def test_show_only_active_fixtures(testdir, mode, dummy_yaml_custom_test):
result.stdout.fnmatch_lines(
["*SETUP F arg1*", "*test_arg1 (fixtures used: arg1)*", "*TEARDOWN F arg1*"]
)
- assert "_arg0" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*_arg0*")
def test_show_different_scopes(testdir, mode):
diff --git a/testing/python/setup_plan.py b/testing/test_setupplan.py
index a44474dd1..a44474dd1 100644
--- a/testing/python/setup_plan.py
+++ b/testing/test_setupplan.py
diff --git a/testing/test_skipping.py b/testing/test_skipping.py
index 8bba479f1..67714d030 100644
--- a/testing/test_skipping.py
+++ b/testing/test_skipping.py
@@ -115,7 +115,7 @@ class TestEvaluator:
)
def test_skipif_class(self, testdir):
- item, = testdir.getitems(
+ (item,) = testdir.getitems(
"""
import pytest
class TestClass(object):
@@ -731,23 +731,37 @@ def test_skipif_class(testdir):
def test_skipped_reasons_functional(testdir):
testdir.makepyfile(
test_one="""
+ import pytest
from conftest import doskip
+
def setup_function(func):
doskip()
+
def test_func():
pass
+
class TestClass(object):
def test_method(self):
doskip()
- """,
+
+ @pytest.mark.skip("via_decorator")
+ def test_deco(self):
+ assert 0
+ """,
conftest="""
- import pytest
+ import pytest, sys
def doskip():
+ assert sys._getframe().f_lineno == 3
pytest.skip('test')
""",
)
result = testdir.runpytest("-rs")
- result.stdout.fnmatch_lines(["*SKIP*2*conftest.py:4: test"])
+ result.stdout.fnmatch_lines_random(
+ [
+ "SKIPPED [[]2[]] */conftest.py:4: test",
+ "SKIPPED [[]1[]] test_one.py:14: via_decorator",
+ ]
+ )
assert result.ret == 0
@@ -886,7 +900,7 @@ def test_errors_in_xfail_skip_expressions(testdir):
" syntax error",
markline,
"SyntaxError: invalid syntax",
- "*1 pass*2 error*",
+ "*1 pass*2 errors*",
]
)
@@ -949,7 +963,7 @@ def test_xfail_test_setup_exception(testdir):
result = testdir.runpytest(p)
assert result.ret == 0
assert "xfailed" in result.stdout.str()
- assert "xpassed" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*xpassed*")
def test_imperativeskip_on_xfail_test(testdir):
diff --git a/testing/test_stepwise.py b/testing/test_stepwise.py
index 032351e7f..8d1b82f70 100644
--- a/testing/test_stepwise.py
+++ b/testing/test_stepwise.py
@@ -164,7 +164,7 @@ def test_stop_on_collection_errors(broken_testdir, broken_first):
if broken_first:
files.reverse()
result = broken_testdir.runpytest("-v", "--strict-markers", "--stepwise", *files)
- result.stdout.fnmatch_lines("*errors during collection*")
+ result.stdout.fnmatch_lines("*error during collection*")
def test_xfail_handling(testdir):
diff --git a/testing/test_terminal.py b/testing/test_terminal.py
index 2cdfc6ca8..d31033197 100644
--- a/testing/test_terminal.py
+++ b/testing/test_terminal.py
@@ -21,30 +21,26 @@ from _pytest.terminal import getreportopt
from _pytest.terminal import TerminalReporter
DistInfo = collections.namedtuple("DistInfo", ["project_name", "version"])
+RED = r"\x1b\[31m"
+GREEN = r"\x1b\[32m"
+YELLOW = r"\x1b\[33m"
+RESET = r"\x1b\[0m"
class Option:
- def __init__(self, verbosity=0, fulltrace=False):
+ def __init__(self, verbosity=0):
self.verbosity = verbosity
- self.fulltrace = fulltrace
@property
def args(self):
values = []
values.append("--verbosity=%d" % self.verbosity)
- if self.fulltrace:
- values.append("--fulltrace")
return values
@pytest.fixture(
- params=[
- Option(verbosity=0),
- Option(verbosity=1),
- Option(verbosity=-1),
- Option(fulltrace=True),
- ],
- ids=["default", "verbose", "quiet", "fulltrace"],
+ params=[Option(verbosity=0), Option(verbosity=1), Option(verbosity=-1)],
+ ids=["default", "verbose", "quiet"],
)
def option(request):
return request.param
@@ -165,7 +161,7 @@ class TestTerminal:
child.expect(r"collecting 2 items")
child.expect(r"collected 2 items")
rest = child.read().decode("utf8")
- assert "2 passed in" in rest
+ assert "= \x1b[32m\x1b[1m2 passed\x1b[0m\x1b[32m in" in rest
def test_itemreport_subclasses_show_subclassed_file(self, testdir):
testdir.makepyfile(
@@ -205,9 +201,10 @@ class TestTerminal:
result = testdir.runpytest("-vv")
assert result.ret == 0
result.stdout.fnmatch_lines(["*a123/test_hello123.py*PASS*"])
- assert " <- " not in result.stdout.str()
+ result.stdout.no_fnmatch_line("* <- *")
- def test_keyboard_interrupt(self, testdir, option):
+ @pytest.mark.parametrize("fulltrace", ("", "--fulltrace"))
+ def test_keyboard_interrupt(self, testdir, fulltrace):
testdir.makepyfile(
"""
def test_foobar():
@@ -219,7 +216,7 @@ class TestTerminal:
"""
)
- result = testdir.runpytest(*option.args, no_reraise_ctrlc=True)
+ result = testdir.runpytest(fulltrace, no_reraise_ctrlc=True)
result.stdout.fnmatch_lines(
[
" def test_foobar():",
@@ -228,7 +225,7 @@ class TestTerminal:
"*_keyboard_interrupt.py:6: KeyboardInterrupt*",
]
)
- if option.fulltrace:
+ if fulltrace:
result.stdout.fnmatch_lines(
["*raise KeyboardInterrupt # simulating the user*"]
)
@@ -560,7 +557,7 @@ class TestTerminalFunctional:
"*= 2 passed, 1 deselected in * =*",
]
)
- assert "= 1 deselected =" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*= 1 deselected =*")
assert result.ret == 0
def test_no_skip_summary_if_failure(self, testdir):
@@ -760,7 +757,7 @@ def test_fail_extra_reporting(testdir, monkeypatch):
monkeypatch.setenv("COLUMNS", "80")
testdir.makepyfile("def test_this(): assert 0, 'this_failed' * 100")
result = testdir.runpytest()
- assert "short test summary" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*short test summary*")
result = testdir.runpytest("-rf")
result.stdout.fnmatch_lines(
[
@@ -773,13 +770,13 @@ def test_fail_extra_reporting(testdir, monkeypatch):
def test_fail_reporting_on_pass(testdir):
testdir.makepyfile("def test_this(): assert 1")
result = testdir.runpytest("-rf")
- assert "short test summary" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*short test summary*")
def test_pass_extra_reporting(testdir):
testdir.makepyfile("def test_this(): assert 1")
result = testdir.runpytest()
- assert "short test summary" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*short test summary*")
result = testdir.runpytest("-rp")
result.stdout.fnmatch_lines(["*test summary*", "PASS*test_pass_extra_reporting*"])
@@ -787,7 +784,7 @@ def test_pass_extra_reporting(testdir):
def test_pass_reporting_on_fail(testdir):
testdir.makepyfile("def test_this(): assert 0")
result = testdir.runpytest("-rp")
- assert "short test summary" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*short test summary*")
def test_pass_output_reporting(testdir):
@@ -830,7 +827,7 @@ def test_color_no(testdir):
testdir.makepyfile("def test_this(): assert 1")
result = testdir.runpytest("--color=no")
assert "test session starts" in result.stdout.str()
- assert "\x1b[1m" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*\x1b[1m*")
@pytest.mark.parametrize("verbose", [True, False])
@@ -852,7 +849,7 @@ def test_color_yes_collection_on_non_atty(testdir, verbose):
result = testdir.runpytest(*args)
assert "test session starts" in result.stdout.str()
assert "\x1b[1m" in result.stdout.str()
- assert "collecting 10 items" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*collecting 10 items*")
if verbose:
assert "collecting ..." in result.stdout.str()
assert "collected 10 items" in result.stdout.str()
@@ -966,7 +963,31 @@ class TestGenericReporting:
)
result = testdir.runpytest("--maxfail=2", *option.args)
result.stdout.fnmatch_lines(
- ["*def test_1():*", "*def test_2():*", "*2 failed*"]
+ [
+ "*def test_1():*",
+ "*def test_2():*",
+ "*! stopping after 2 failures !*",
+ "*2 failed*",
+ ]
+ )
+
+ def test_maxfailures_with_interrupted(self, testdir):
+ testdir.makepyfile(
+ """
+ def test(request):
+ request.session.shouldstop = "session_interrupted"
+ assert 0
+ """
+ )
+ result = testdir.runpytest("--maxfail=1", "-ra")
+ result.stdout.fnmatch_lines(
+ [
+ "*= short test summary info =*",
+ "FAILED *",
+ "*! stopping after 1 failures !*",
+ "*! session_interrupted !*",
+ "*= 1 failed in*",
+ ]
)
def test_tb_option(self, testdir, option):
@@ -1215,7 +1236,7 @@ def test_terminal_summary_warnings_are_displayed(testdir):
"*== 1 failed, 2 warnings in *",
]
)
- assert "None" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*None*")
stdout = result.stdout.str()
assert stdout.count("warning_from_test") == 1
assert stdout.count("=== warnings summary ") == 2
@@ -1237,10 +1258,10 @@ def test_terminal_summary_warnings_header_once(testdir):
"*= warnings summary =*",
"*warning_from_test*",
"*= short test summary info =*",
- "*== 1 failed, 1 warnings in *",
+ "*== 1 failed, 1 warning in *",
]
)
- assert "None" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*None*")
stdout = result.stdout.str()
assert stdout.count("warning_from_test") == 1
assert stdout.count("=== warnings summary ") == 1
@@ -1253,42 +1274,120 @@ def test_terminal_summary_warnings_header_once(testdir):
# dict value, not the actual contents, so tuples of anything
# suffice
# Important statuses -- the highest priority of these always wins
- ("red", "1 failed", {"failed": (1,)}),
- ("red", "1 failed, 1 passed", {"failed": (1,), "passed": (1,)}),
- ("red", "1 error", {"error": (1,)}),
- ("red", "1 passed, 1 error", {"error": (1,), "passed": (1,)}),
+ ("red", [("1 failed", {"bold": True, "red": True})], {"failed": (1,)}),
+ (
+ "red",
+ [
+ ("1 failed", {"bold": True, "red": True}),
+ ("1 passed", {"bold": False, "green": True}),
+ ],
+ {"failed": (1,), "passed": (1,)},
+ ),
+ ("red", [("1 error", {"bold": True, "red": True})], {"error": (1,)}),
+ ("red", [("2 errors", {"bold": True, "red": True})], {"error": (1, 2)}),
+ (
+ "red",
+ [
+ ("1 passed", {"bold": False, "green": True}),
+ ("1 error", {"bold": True, "red": True}),
+ ],
+ {"error": (1,), "passed": (1,)},
+ ),
# (a status that's not known to the code)
- ("yellow", "1 weird", {"weird": (1,)}),
- ("yellow", "1 passed, 1 weird", {"weird": (1,), "passed": (1,)}),
- ("yellow", "1 warnings", {"warnings": (1,)}),
- ("yellow", "1 passed, 1 warnings", {"warnings": (1,), "passed": (1,)}),
- ("green", "5 passed", {"passed": (1, 2, 3, 4, 5)}),
+ ("yellow", [("1 weird", {"bold": True, "yellow": True})], {"weird": (1,)}),
+ (
+ "yellow",
+ [
+ ("1 passed", {"bold": False, "green": True}),
+ ("1 weird", {"bold": True, "yellow": True}),
+ ],
+ {"weird": (1,), "passed": (1,)},
+ ),
+ ("yellow", [("1 warning", {"bold": True, "yellow": True})], {"warnings": (1,)}),
+ (
+ "yellow",
+ [
+ ("1 passed", {"bold": False, "green": True}),
+ ("1 warning", {"bold": True, "yellow": True}),
+ ],
+ {"warnings": (1,), "passed": (1,)},
+ ),
+ (
+ "green",
+ [("5 passed", {"bold": True, "green": True})],
+ {"passed": (1, 2, 3, 4, 5)},
+ ),
# "Boring" statuses. These have no effect on the color of the summary
# line. Thus, if *every* test has a boring status, the summary line stays
# at its default color, i.e. yellow, to warn the user that the test run
# produced no useful information
- ("yellow", "1 skipped", {"skipped": (1,)}),
- ("green", "1 passed, 1 skipped", {"skipped": (1,), "passed": (1,)}),
- ("yellow", "1 deselected", {"deselected": (1,)}),
- ("green", "1 passed, 1 deselected", {"deselected": (1,), "passed": (1,)}),
- ("yellow", "1 xfailed", {"xfailed": (1,)}),
- ("green", "1 passed, 1 xfailed", {"xfailed": (1,), "passed": (1,)}),
- ("yellow", "1 xpassed", {"xpassed": (1,)}),
- ("green", "1 passed, 1 xpassed", {"xpassed": (1,), "passed": (1,)}),
+ ("yellow", [("1 skipped", {"bold": True, "yellow": True})], {"skipped": (1,)}),
+ (
+ "green",
+ [
+ ("1 passed", {"bold": True, "green": True}),
+ ("1 skipped", {"bold": False, "yellow": True}),
+ ],
+ {"skipped": (1,), "passed": (1,)},
+ ),
+ (
+ "yellow",
+ [("1 deselected", {"bold": True, "yellow": True})],
+ {"deselected": (1,)},
+ ),
+ (
+ "green",
+ [
+ ("1 passed", {"bold": True, "green": True}),
+ ("1 deselected", {"bold": False, "yellow": True}),
+ ],
+ {"deselected": (1,), "passed": (1,)},
+ ),
+ ("yellow", [("1 xfailed", {"bold": True, "yellow": True})], {"xfailed": (1,)}),
+ (
+ "green",
+ [
+ ("1 passed", {"bold": True, "green": True}),
+ ("1 xfailed", {"bold": False, "yellow": True}),
+ ],
+ {"xfailed": (1,), "passed": (1,)},
+ ),
+ ("yellow", [("1 xpassed", {"bold": True, "yellow": True})], {"xpassed": (1,)}),
+ (
+ "green",
+ [
+ ("1 passed", {"bold": True, "green": True}),
+ ("1 xpassed", {"bold": False, "yellow": True}),
+ ],
+ {"xpassed": (1,), "passed": (1,)},
+ ),
# Likewise if no tests were found at all
- ("yellow", "no tests ran", {}),
+ ("yellow", [("no tests ran", {"yellow": True})], {}),
# Test the empty-key special case
- ("yellow", "no tests ran", {"": (1,)}),
- ("green", "1 passed", {"": (1,), "passed": (1,)}),
+ ("yellow", [("no tests ran", {"yellow": True})], {"": (1,)}),
+ (
+ "green",
+ [("1 passed", {"bold": True, "green": True})],
+ {"": (1,), "passed": (1,)},
+ ),
# A couple more complex combinations
(
"red",
- "1 failed, 2 passed, 3 xfailed",
+ [
+ ("1 failed", {"bold": True, "red": True}),
+ ("2 passed", {"bold": False, "green": True}),
+ ("3 xfailed", {"bold": False, "yellow": True}),
+ ],
{"passed": (1, 2), "failed": (1,), "xfailed": (1, 2, 3)},
),
(
"green",
- "1 passed, 2 skipped, 3 deselected, 2 xfailed",
+ [
+ ("1 passed", {"bold": True, "green": True}),
+ ("2 skipped", {"bold": False, "yellow": True}),
+ ("3 deselected", {"bold": False, "yellow": True}),
+ ("2 xfailed", {"bold": False, "yellow": True}),
+ ],
{
"passed": (1,),
"skipped": (1, 2),
@@ -1314,11 +1413,11 @@ def test_skip_counting_towards_summary():
r1 = DummyReport()
r2 = DummyReport()
res = build_summary_stats_line({"failed": (r1, r2)})
- assert res == ("2 failed", "red")
+ assert res == ([("2 failed", {"bold": True, "red": True})], "red")
r1.count_towards_summary = False
res = build_summary_stats_line({"failed": (r1, r2)})
- assert res == ("1 failed", "red")
+ assert res == ([("1 failed", {"bold": True, "red": True})], "red")
class TestClassicOutputStyle:
@@ -1403,7 +1502,7 @@ class TestProgressOutputStyle:
"""
)
output = testdir.runpytest()
- assert "ZeroDivisionError" not in output.stdout.str()
+ output.stdout.no_fnmatch_line("*ZeroDivisionError*")
output.stdout.fnmatch_lines(["=* 2 passed in *="])
def test_normal(self, many_tests_files, testdir):
@@ -1416,6 +1515,43 @@ class TestProgressOutputStyle:
]
)
+ def test_colored_progress(self, testdir, monkeypatch):
+ monkeypatch.setenv("PY_COLORS", "1")
+ testdir.makepyfile(
+ test_bar="""
+ import pytest
+ @pytest.mark.parametrize('i', range(10))
+ def test_bar(i): pass
+ """,
+ test_foo="""
+ import pytest
+ import warnings
+ @pytest.mark.parametrize('i', range(5))
+ def test_foo(i):
+ warnings.warn(DeprecationWarning("collection"))
+ pass
+ """,
+ test_foobar="""
+ import pytest
+ @pytest.mark.parametrize('i', range(5))
+ def test_foobar(i): raise ValueError()
+ """,
+ )
+ output = testdir.runpytest()
+ output.stdout.re_match_lines(
+ [
+ r"test_bar.py ({green}\.{reset}){{10}}{green} \s+ \[ 50%\]{reset}".format(
+ green=GREEN, reset=RESET
+ ),
+ r"test_foo.py ({green}\.{reset}){{5}}{yellow} \s+ \[ 75%\]{reset}".format(
+ green=GREEN, reset=RESET, yellow=YELLOW
+ ),
+ r"test_foobar.py ({red}F{reset}){{5}}{red} \s+ \[100%\]{reset}".format(
+ reset=RESET, red=RED
+ ),
+ ]
+ )
+
def test_count(self, many_tests_files, testdir):
testdir.makeini(
"""
@@ -1495,7 +1631,7 @@ class TestProgressOutputStyle:
)
output = testdir.runpytest("--capture=no")
- assert "%]" not in output.stdout.str()
+ output.stdout.no_fnmatch_line("*%]*")
class TestProgressWithTeardown:
@@ -1696,3 +1832,20 @@ def test_format_session_duration(seconds, expected):
from _pytest.terminal import format_session_duration
assert format_session_duration(seconds) == expected
+
+
+def test_collecterror(testdir):
+ p1 = testdir.makepyfile("raise SyntaxError()")
+ result = testdir.runpytest("-ra", str(p1))
+ result.stdout.fnmatch_lines(
+ [
+ "collected 0 items / 1 error",
+ "*= ERRORS =*",
+ "*_ ERROR collecting test_collecterror.py _*",
+ "E SyntaxError: *",
+ "*= short test summary info =*",
+ "ERROR test_collecterror.py",
+ "*! Interrupted: 1 error during collection !*",
+ "*= 1 error in *",
+ ]
+ )
diff --git a/testing/test_tmpdir.py b/testing/test_tmpdir.py
index 0ebed22ac..eb1c1f300 100644
--- a/testing/test_tmpdir.py
+++ b/testing/test_tmpdir.py
@@ -258,7 +258,7 @@ class TestNumberedDir:
registry = []
register_cleanup_lock_removal(lock, register=registry.append)
- cleanup_func, = registry
+ (cleanup_func,) = registry
assert lock.is_file()
@@ -388,11 +388,21 @@ class TestRmRf:
assert not on_rm_rf_error(None, str(fn), exc_info, start_path=tmp_path)
# unknown function
- with pytest.warns(pytest.PytestWarning):
+ with pytest.warns(
+ pytest.PytestWarning,
+ match=r"^\(rm_rf\) unknown function None when removing .*foo.txt:\nNone: ",
+ ):
exc_info = (None, PermissionError(), None)
on_rm_rf_error(None, str(fn), exc_info, start_path=tmp_path)
assert fn.is_file()
+ # ignored function
+ with pytest.warns(None) as warninfo:
+ exc_info = (None, PermissionError(), None)
+ on_rm_rf_error(os.open, str(fn), exc_info, start_path=tmp_path)
+ assert fn.is_file()
+ assert not [x.message for x in warninfo]
+
exc_info = (None, PermissionError(), None)
on_rm_rf_error(os.unlink, str(fn), exc_info, start_path=tmp_path)
assert not fn.is_file()
diff --git a/testing/test_unittest.py b/testing/test_unittest.py
index 9b1b688ff..885178402 100644
--- a/testing/test_unittest.py
+++ b/testing/test_unittest.py
@@ -233,7 +233,7 @@ def test_unittest_skip_issue148(testdir):
def test_method_and_teardown_failing_reporting(testdir):
testdir.makepyfile(
"""
- import unittest, pytest
+ import unittest
class TC(unittest.TestCase):
def tearDown(self):
assert 0, "down1"
@@ -270,7 +270,7 @@ def test_setup_failure_is_shown(testdir):
result = testdir.runpytest("-s")
assert result.ret == 1
result.stdout.fnmatch_lines(["*setUp*", "*assert 0*down1*", "*1 failed*"])
- assert "never42" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*never42*")
def test_setup_setUpClass(testdir):
@@ -342,7 +342,7 @@ def test_testcase_adderrorandfailure_defers(testdir, type):
% (type, type)
)
result = testdir.runpytest()
- assert "should not raise" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*should not raise*")
@pytest.mark.parametrize("type", ["Error", "Failure"])
@@ -383,7 +383,7 @@ def test_testcase_custom_exception_info(testdir, type):
def test_testcase_totally_incompatible_exception_info(testdir):
- item, = testdir.getitems(
+ (item,) = testdir.getitems(
"""
from unittest import TestCase
class MyTestCase(TestCase):
@@ -530,19 +530,31 @@ class TestTrialUnittest:
# will crash both at test time and at teardown
"""
)
- result = testdir.runpytest()
+ # Ignore DeprecationWarning (for `cmp`) from attrs through twisted,
+ # for stable test results.
+ result = testdir.runpytest(
+ "-vv", "-oconsole_output_style=classic", "-W", "ignore::DeprecationWarning"
+ )
result.stdout.fnmatch_lines(
[
+ "test_trial_error.py::TC::test_four FAILED",
+ "test_trial_error.py::TC::test_four ERROR",
+ "test_trial_error.py::TC::test_one FAILED",
+ "test_trial_error.py::TC::test_three FAILED",
+ "test_trial_error.py::TC::test_two FAILED",
"*ERRORS*",
+ "*_ ERROR at teardown of TC.test_four _*",
"*DelayedCalls*",
- "*test_four*",
+ "*= FAILURES =*",
+ "*_ TC.test_four _*",
"*NameError*crash*",
- "*test_one*",
+ "*_ TC.test_one _*",
"*NameError*crash*",
- "*test_three*",
+ "*_ TC.test_three _*",
"*DelayedCalls*",
- "*test_two*",
- "*crash*",
+ "*_ TC.test_two _*",
+ "*NameError*crash*",
+ "*= 4 failed, 1 error in *",
]
)
@@ -684,7 +696,7 @@ def test_unittest_not_shown_in_traceback(testdir):
"""
)
res = testdir.runpytest()
- assert "failUnlessEqual" not in res.stdout.str()
+ res.stdout.no_fnmatch_line("*failUnlessEqual*")
def test_unorderable_types(testdir):
@@ -703,7 +715,7 @@ def test_unorderable_types(testdir):
"""
)
result = testdir.runpytest()
- assert "TypeError" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*TypeError*")
assert result.ret == ExitCode.NO_TESTS_COLLECTED
@@ -1020,7 +1032,7 @@ def test_testcase_handles_init_exceptions(testdir):
)
result = testdir.runpytest()
assert "should raise this exception" in result.stdout.str()
- assert "ERROR at teardown of MyTestCase.test_hello" not in result.stdout.str()
+ result.stdout.no_fnmatch_line("*ERROR at teardown of MyTestCase.test_hello*")
def test_error_message_with_parametrized_fixtures(testdir):
diff --git a/testing/test_warnings.py b/testing/test_warnings.py
index 077636c52..c4af14dac 100644
--- a/testing/test_warnings.py
+++ b/testing/test_warnings.py
@@ -142,7 +142,7 @@ def test_unicode(testdir, pyfile_with_warnings):
[
"*== %s ==*" % WARNINGS_SUMMARY_HEADER,
"*test_unicode.py:7: UserWarning: \u6d4b\u8bd5*",
- "* 1 passed, 1 warnings*",
+ "* 1 passed, 1 warning*",
]
)
@@ -201,7 +201,7 @@ def test_filterwarnings_mark(testdir, default_config):
"""
)
result = testdir.runpytest("-W always" if default_config == "cmdline" else "")
- result.stdout.fnmatch_lines(["*= 1 failed, 2 passed, 1 warnings in *"])
+ result.stdout.fnmatch_lines(["*= 1 failed, 2 passed, 1 warning in *"])
def test_non_string_warning_argument(testdir):
@@ -216,7 +216,7 @@ def test_non_string_warning_argument(testdir):
"""
)
result = testdir.runpytest("-W", "always")
- result.stdout.fnmatch_lines(["*= 1 passed, 1 warnings in *"])
+ result.stdout.fnmatch_lines(["*= 1 passed, 1 warning in *"])
def test_filterwarnings_mark_registration(testdir):
@@ -302,7 +302,7 @@ def test_collection_warnings(testdir):
"*== %s ==*" % WARNINGS_SUMMARY_HEADER,
" *collection_warnings.py:3: UserWarning: collection warning",
' warnings.warn(UserWarning("collection warning"))',
- "* 1 passed, 1 warnings*",
+ "* 1 passed, 1 warning*",
]
)
@@ -358,7 +358,7 @@ def test_hide_pytest_internal_warnings(testdir, ignore_pytest_warnings):
[
"*== %s ==*" % WARNINGS_SUMMARY_HEADER,
"*test_hide_pytest_internal_warnings.py:4: PytestWarning: some internal warning",
- "* 1 passed, 1 warnings *",
+ "* 1 passed, 1 warning *",
]
)
@@ -476,7 +476,7 @@ class TestDeprecationWarningsByDefault:
[
"*== %s ==*" % WARNINGS_SUMMARY_HEADER,
"*test_hidden_by_mark.py:3: DeprecationWarning: collection",
- "* 1 passed, 1 warnings*",
+ "* 1 passed, 1 warning*",
]
)
@@ -605,6 +605,7 @@ def test_warnings_checker_twice():
warnings.warn("Message B", UserWarning)
+@pytest.mark.filterwarnings("ignore::pytest.PytestExperimentalApiWarning")
@pytest.mark.filterwarnings("always")
def test_group_warnings_by_message(testdir):
testdir.copy_example("warnings/test_group_warnings_by_message.py")