summaryrefslogtreecommitdiff
path: root/testing
diff options
context:
space:
mode:
Diffstat (limited to 'testing')
-rw-r--r--testing/acceptance_test.py24
-rw-r--r--testing/deprecated_test.py76
-rw-r--r--testing/logging/test_reporting.py186
-rw-r--r--testing/python/approx.py22
-rw-r--r--testing/python/fixture.py217
-rw-r--r--testing/python/metafunc.py10
-rw-r--r--testing/test_cacheprovider.py149
-rw-r--r--testing/test_doctest.py21
-rw-r--r--testing/test_junitxml.py39
-rw-r--r--testing/test_mark.py17
-rw-r--r--testing/test_pdb.py36
-rw-r--r--testing/test_resultlog.py2
-rw-r--r--testing/test_session.py44
-rw-r--r--testing/test_skipping.py40
-rw-r--r--testing/test_terminal.py101
15 files changed, 931 insertions, 53 deletions
diff --git a/testing/acceptance_test.py b/testing/acceptance_test.py
index 36b9536f3..89a44911f 100644
--- a/testing/acceptance_test.py
+++ b/testing/acceptance_test.py
@@ -964,3 +964,27 @@ def test_fixture_values_leak(testdir):
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines(['* 2 passed *'])
+
+
+def test_fixture_order_respects_scope(testdir):
+ """Ensure that fixtures are created according to scope order, regression test for #2405
+ """
+ testdir.makepyfile('''
+ import pytest
+
+ data = {}
+
+ @pytest.fixture(scope='module')
+ def clean_data():
+ data.clear()
+
+ @pytest.fixture(autouse=True)
+ def add_data():
+ data.update(value=True)
+
+ @pytest.mark.usefixtures('clean_data')
+ def test_value():
+ assert data.get('value')
+ ''')
+ result = testdir.runpytest()
+ assert result.ret == 0
diff --git a/testing/deprecated_test.py b/testing/deprecated_test.py
index 92ec029d4..cb66472c9 100644
--- a/testing/deprecated_test.py
+++ b/testing/deprecated_test.py
@@ -48,6 +48,15 @@ def test_pytest_setup_cfg_deprecated(testdir):
result.stdout.fnmatch_lines(['*pytest*section in setup.cfg files is deprecated*use*tool:pytest*instead*'])
+def test_pytest_custom_cfg_deprecated(testdir):
+ testdir.makefile('.cfg', custom='''
+ [pytest]
+ addopts = --verbose
+ ''')
+ result = testdir.runpytest("-c", "custom.cfg")
+ result.stdout.fnmatch_lines(['*pytest*section in custom.cfg files is deprecated*use*tool:pytest*instead*'])
+
+
def test_str_args_deprecated(tmpdir, testdir):
"""Deprecate passing strings to pytest.main(). Scheduled for removal in pytest-4.0."""
from _pytest.main import EXIT_NOTESTSCOLLECTED
@@ -125,3 +134,70 @@ def test_pytest_catchlog_deprecated(testdir, plugin):
"*pytest-*log plugin has been merged into the core*",
"*1 passed, 1 warnings*",
])
+
+
+def test_pytest_plugins_in_non_top_level_conftest_deprecated(testdir):
+ from _pytest.deprecated import PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST
+ subdirectory = testdir.tmpdir.join("subdirectory")
+ subdirectory.mkdir()
+ # create the inner conftest with makeconftest and then move it to the subdirectory
+ testdir.makeconftest("""
+ pytest_plugins=['capture']
+ """)
+ testdir.tmpdir.join("conftest.py").move(subdirectory.join("conftest.py"))
+ # make the top level conftest
+ testdir.makeconftest("""
+ import warnings
+ warnings.filterwarnings('always', category=DeprecationWarning)
+ """)
+ testdir.makepyfile("""
+ def test_func():
+ pass
+ """)
+ res = testdir.runpytest_subprocess()
+ assert res.ret == 0
+ res.stderr.fnmatch_lines('*' + str(PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST).splitlines()[0])
+
+
+def test_pytest_plugins_in_non_top_level_conftest_deprecated_no_top_level_conftest(testdir):
+ from _pytest.deprecated import PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST
+ subdirectory = testdir.tmpdir.join('subdirectory')
+ subdirectory.mkdir()
+ testdir.makeconftest("""
+ import warnings
+ warnings.filterwarnings('always', category=DeprecationWarning)
+ pytest_plugins=['capture']
+ """)
+ testdir.tmpdir.join("conftest.py").move(subdirectory.join("conftest.py"))
+
+ testdir.makepyfile("""
+ def test_func():
+ pass
+ """)
+
+ res = testdir.runpytest_subprocess()
+ assert res.ret == 0
+ res.stderr.fnmatch_lines('*' + str(PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST).splitlines()[0])
+
+
+def test_pytest_plugins_in_non_top_level_conftest_deprecated_no_false_positives(testdir):
+ from _pytest.deprecated import PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST
+ subdirectory = testdir.tmpdir.join('subdirectory')
+ subdirectory.mkdir()
+ testdir.makeconftest("""
+ pass
+ """)
+ testdir.tmpdir.join("conftest.py").move(subdirectory.join("conftest.py"))
+
+ testdir.makeconftest("""
+ import warnings
+ warnings.filterwarnings('always', category=DeprecationWarning)
+ pytest_plugins=['capture']
+ """)
+ testdir.makepyfile("""
+ def test_func():
+ pass
+ """)
+ res = testdir.runpytest_subprocess()
+ assert res.ret == 0
+ assert str(PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST).splitlines()[0] not in res.stderr.str()
diff --git a/testing/logging/test_reporting.py b/testing/logging/test_reporting.py
index 8dfe04ad9..699df0e60 100644
--- a/testing/logging/test_reporting.py
+++ b/testing/logging/test_reporting.py
@@ -1,4 +1,5 @@
# -*- coding: utf-8 -*-
+import re
import os
import six
@@ -48,6 +49,66 @@ def test_messages_logged(testdir):
'text going to stderr'])
+def test_root_logger_affected(testdir):
+ testdir.makepyfile("""
+ import logging
+ logger = logging.getLogger()
+ def test_foo():
+ logger.info('info text ' + 'going to logger')
+ logger.warning('warning text ' + 'going to logger')
+ logger.error('error text ' + 'going to logger')
+
+ assert 0
+ """)
+ log_file = testdir.tmpdir.join('pytest.log').strpath
+ result = testdir.runpytest('--log-level=ERROR', '--log-file=pytest.log')
+ assert result.ret == 1
+
+ # the capture log calls in the stdout section only contain the
+ # logger.error msg, because --log-level=ERROR
+ result.stdout.fnmatch_lines(['*error text going to logger*'])
+ with pytest.raises(pytest.fail.Exception):
+ result.stdout.fnmatch_lines(['*warning text going to logger*'])
+ with pytest.raises(pytest.fail.Exception):
+ result.stdout.fnmatch_lines(['*info text going to logger*'])
+
+ # the log file should contain the warning and the error log messages and
+ # not the info one, because the default level of the root logger is
+ # WARNING.
+ assert os.path.isfile(log_file)
+ with open(log_file) as rfh:
+ contents = rfh.read()
+ assert "info text going to logger" not in contents
+ assert "warning text going to logger" in contents
+ assert "error text going to logger" in contents
+
+
+def test_log_cli_level_log_level_interaction(testdir):
+ testdir.makepyfile("""
+ import logging
+ logger = logging.getLogger()
+
+ def test_foo():
+ logger.debug('debug text ' + 'going to logger')
+ logger.info('info text ' + 'going to logger')
+ logger.warning('warning text ' + 'going to logger')
+ logger.error('error text ' + 'going to logger')
+ assert 0
+ """)
+
+ result = testdir.runpytest('--log-cli-level=INFO', '--log-level=ERROR')
+ assert result.ret == 1
+
+ result.stdout.fnmatch_lines([
+ '*-- live log call --*',
+ '*INFO*info text going to logger',
+ '*WARNING*warning text going to logger',
+ '*ERROR*error text going to logger',
+ '=* 1 failed in *=',
+ ])
+ assert 'DEBUG' not in result.stdout.str()
+
+
def test_setup_logging(testdir):
testdir.makepyfile('''
import logging
@@ -60,7 +121,7 @@ def test_setup_logging(testdir):
def test_foo():
logger.info('text going to logger from call')
assert False
- ''')
+ ''')
result = testdir.runpytest('--log-level=INFO')
assert result.ret == 1
result.stdout.fnmatch_lines(['*- Captured *log setup -*',
@@ -161,6 +222,7 @@ def test_log_cli_enabled_disabled(testdir, enabled):
if enabled:
result.stdout.fnmatch_lines([
'test_log_cli_enabled_disabled.py::test_log_cli ',
+ '*-- live log call --*',
'test_log_cli_enabled_disabled.py* CRITICAL critical message logged by test',
'PASSED*',
])
@@ -226,8 +288,20 @@ def test_log_cli_default_level_multiple_tests(testdir, request):
def test_log_cli_default_level_sections(testdir, request):
- """Check that with live logging enable we are printing the correct headers during setup/call/teardown."""
+ """Check that with live logging enable we are printing the correct headers during
+ start/setup/call/teardown/finish."""
filename = request.node.name + '.py'
+ testdir.makeconftest('''
+ import pytest
+ import logging
+
+ def pytest_runtest_logstart():
+ logging.warning('>>>>> START >>>>>')
+
+ def pytest_runtest_logfinish():
+ logging.warning('<<<<< END <<<<<<<')
+ ''')
+
testdir.makepyfile('''
import pytest
import logging
@@ -252,6 +326,8 @@ def test_log_cli_default_level_sections(testdir, request):
result = testdir.runpytest()
result.stdout.fnmatch_lines([
'{}::test_log_1 '.format(filename),
+ '*-- live log start --*',
+ '*WARNING* >>>>> START >>>>>*',
'*-- live log setup --*',
'*WARNING*log message from setup of test_log_1*',
'*-- live log call --*',
@@ -259,8 +335,12 @@ def test_log_cli_default_level_sections(testdir, request):
'PASSED *50%*',
'*-- live log teardown --*',
'*WARNING*log message from teardown of test_log_1*',
+ '*-- live log finish --*',
+ '*WARNING* <<<<< END <<<<<<<*',
'{}::test_log_2 '.format(filename),
+ '*-- live log start --*',
+ '*WARNING* >>>>> START >>>>>*',
'*-- live log setup --*',
'*WARNING*log message from setup of test_log_2*',
'*-- live log call --*',
@@ -268,6 +348,8 @@ def test_log_cli_default_level_sections(testdir, request):
'PASSED *100%*',
'*-- live log teardown --*',
'*WARNING*log message from teardown of test_log_2*',
+ '*-- live log finish --*',
+ '*WARNING* <<<<< END <<<<<<<*',
'=* 2 passed in *=',
])
@@ -326,6 +408,64 @@ def test_live_logs_unknown_sections(testdir, request):
])
+def test_sections_single_new_line_after_test_outcome(testdir, request):
+ """Check that only a single new line is written between log messages during
+ teardown/finish."""
+ filename = request.node.name + '.py'
+ testdir.makeconftest('''
+ import pytest
+ import logging
+
+ def pytest_runtest_logstart():
+ logging.warning('>>>>> START >>>>>')
+
+ def pytest_runtest_logfinish():
+ logging.warning('<<<<< END <<<<<<<')
+ logging.warning('<<<<< END <<<<<<<')
+ ''')
+
+ testdir.makepyfile('''
+ import pytest
+ import logging
+
+ @pytest.fixture
+ def fix(request):
+ logging.warning("log message from setup of {}".format(request.node.name))
+ yield
+ logging.warning("log message from teardown of {}".format(request.node.name))
+ logging.warning("log message from teardown of {}".format(request.node.name))
+
+ def test_log_1(fix):
+ logging.warning("log message from test_log_1")
+ ''')
+ testdir.makeini('''
+ [pytest]
+ log_cli=true
+ ''')
+
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines([
+ '{}::test_log_1 '.format(filename),
+ '*-- live log start --*',
+ '*WARNING* >>>>> START >>>>>*',
+ '*-- live log setup --*',
+ '*WARNING*log message from setup of test_log_1*',
+ '*-- live log call --*',
+ '*WARNING*log message from test_log_1*',
+ 'PASSED *100%*',
+ '*-- live log teardown --*',
+ '*WARNING*log message from teardown of test_log_1*',
+ '*-- live log finish --*',
+ '*WARNING* <<<<< END <<<<<<<*',
+ '*WARNING* <<<<< END <<<<<<<*',
+ '=* 1 passed in *=',
+ ])
+ assert re.search(r'(.+)live log teardown(.+)\n(.+)WARNING(.+)\n(.+)WARNING(.+)',
+ result.stdout.str(), re.MULTILINE) is not None
+ assert re.search(r'(.+)live log finish(.+)\n(.+)WARNING(.+)\n(.+)WARNING(.+)',
+ result.stdout.str(), re.MULTILINE) is not None
+
+
def test_log_cli_level(testdir):
# Default log file level
testdir.makepyfile('''
@@ -399,6 +539,48 @@ def test_log_cli_ini_level(testdir):
assert result.ret == 0
+@pytest.mark.parametrize('cli_args', ['',
+ '--log-level=WARNING',
+ '--log-file-level=WARNING',
+ '--log-cli-level=WARNING'])
+def test_log_cli_auto_enable(testdir, request, cli_args):
+ """Check that live logs are enabled if --log-level or --log-cli-level is passed on the CLI.
+ It should not be auto enabled if the same configs are set on the INI file.
+ """
+ testdir.makepyfile('''
+ import pytest
+ import logging
+
+ def test_log_1():
+ logging.info("log message from test_log_1 not to be shown")
+ logging.warning("log message from test_log_1")
+
+ ''')
+ testdir.makeini('''
+ [pytest]
+ log_level=INFO
+ log_cli_level=INFO
+ ''')
+
+ result = testdir.runpytest(cli_args)
+ if cli_args == '--log-cli-level=WARNING':
+ result.stdout.fnmatch_lines([
+ '*::test_log_1 ',
+ '*-- live log call --*',
+ '*WARNING*log message from test_log_1*',
+ 'PASSED *100%*',
+ '=* 1 passed in *=',
+ ])
+ assert 'INFO' not in result.stdout.str()
+ else:
+ result.stdout.fnmatch_lines([
+ '*test_log_cli_auto_enable*100%*',
+ '=* 1 passed in *=',
+ ])
+ assert 'INFO' not in result.stdout.str()
+ assert 'WARNING' not in result.stdout.str()
+
+
def test_log_file_cli(testdir):
# Default log file level
testdir.makepyfile('''
diff --git a/testing/python/approx.py b/testing/python/approx.py
index 341e5fcff..9ca21bdf8 100644
--- a/testing/python/approx.py
+++ b/testing/python/approx.py
@@ -391,3 +391,25 @@ class TestApprox(object):
"""
with pytest.raises(TypeError):
op(1, approx(1, rel=1e-6, abs=1e-12))
+
+ def test_numpy_array_with_scalar(self):
+ np = pytest.importorskip('numpy')
+
+ actual = np.array([1 + 1e-7, 1 - 1e-8])
+ expected = 1.0
+
+ assert actual == approx(expected, rel=5e-7, abs=0)
+ assert actual != approx(expected, rel=5e-8, abs=0)
+ assert approx(expected, rel=5e-7, abs=0) == actual
+ assert approx(expected, rel=5e-8, abs=0) != actual
+
+ def test_numpy_scalar_with_array(self):
+ np = pytest.importorskip('numpy')
+
+ actual = 1.0
+ expected = np.array([1 + 1e-7, 1 - 1e-8])
+
+ assert actual == approx(expected, rel=5e-7, abs=0)
+ assert actual != approx(expected, rel=5e-8, abs=0)
+ assert approx(expected, rel=5e-7, abs=0) == actual
+ assert approx(expected, rel=5e-8, abs=0) != actual
diff --git a/testing/python/fixture.py b/testing/python/fixture.py
index 8638e361a..c558ea3cf 100644
--- a/testing/python/fixture.py
+++ b/testing/python/fixture.py
@@ -3,7 +3,7 @@ from textwrap import dedent
import _pytest._code
import pytest
from _pytest.pytester import get_public_names
-from _pytest.fixtures import FixtureLookupError
+from _pytest.fixtures import FixtureLookupError, FixtureRequest
from _pytest import fixtures
@@ -1781,6 +1781,8 @@ class TestAutouseManagement(object):
import pytest
values = []
def pytest_generate_tests(metafunc):
+ if metafunc.cls is None:
+ assert metafunc.function is test_finish
if metafunc.cls is not None:
metafunc.parametrize("item", [1,2], scope="class")
class TestClass(object):
@@ -1798,7 +1800,7 @@ class TestAutouseManagement(object):
assert values == ["setup-1", "step1-1", "step2-1", "teardown-1",
"setup-2", "step1-2", "step2-2", "teardown-2",]
""")
- reprec = testdir.inline_run()
+ reprec = testdir.inline_run('-s')
reprec.assertoutcome(passed=5)
def test_ordering_autouse_before_explicit(self, testdir):
@@ -2281,19 +2283,19 @@ class TestFixtureMarker(object):
pass
""")
result = testdir.runpytest("-vs")
- result.stdout.fnmatch_lines("""
- test_class_ordering.py::TestClass2::test_1[1-a] PASSED
- test_class_ordering.py::TestClass2::test_1[2-a] PASSED
- test_class_ordering.py::TestClass2::test_2[1-a] PASSED
- test_class_ordering.py::TestClass2::test_2[2-a] PASSED
- test_class_ordering.py::TestClass2::test_1[1-b] PASSED
- test_class_ordering.py::TestClass2::test_1[2-b] PASSED
- test_class_ordering.py::TestClass2::test_2[1-b] PASSED
- test_class_ordering.py::TestClass2::test_2[2-b] PASSED
- test_class_ordering.py::TestClass::test_3[1-a] PASSED
- test_class_ordering.py::TestClass::test_3[2-a] PASSED
- test_class_ordering.py::TestClass::test_3[1-b] PASSED
- test_class_ordering.py::TestClass::test_3[2-b] PASSED
+ result.stdout.re_match_lines(r"""
+ test_class_ordering.py::TestClass2::test_1\[a-1\] PASSED
+ test_class_ordering.py::TestClass2::test_1\[a-2\] PASSED
+ test_class_ordering.py::TestClass2::test_2\[a-1\] PASSED
+ test_class_ordering.py::TestClass2::test_2\[a-2\] PASSED
+ test_class_ordering.py::TestClass2::test_1\[b-1\] PASSED
+ test_class_ordering.py::TestClass2::test_1\[b-2\] PASSED
+ test_class_ordering.py::TestClass2::test_2\[b-1\] PASSED
+ test_class_ordering.py::TestClass2::test_2\[b-2\] PASSED
+ test_class_ordering.py::TestClass::test_3\[a-1\] PASSED
+ test_class_ordering.py::TestClass::test_3\[a-2\] PASSED
+ test_class_ordering.py::TestClass::test_3\[b-1\] PASSED
+ test_class_ordering.py::TestClass::test_3\[b-2\] PASSED
""")
def test_parametrize_separated_order_higher_scope_first(self, testdir):
@@ -3245,3 +3247,188 @@ def test_pytest_fixture_setup_and_post_finalizer_hook(testdir):
"*TESTS finalizer hook called for my_fixture from test_func*",
"*ROOT finalizer hook called for my_fixture from test_func*",
])
+
+
+class TestScopeOrdering(object):
+ """Class of tests that ensure fixtures are ordered based on their scopes (#2405)"""
+
+ @pytest.mark.parametrize('use_mark', [True, False])
+ def test_func_closure_module_auto(self, testdir, use_mark):
+ """Semantically identical to the example posted in #2405 when ``use_mark=True``"""
+ testdir.makepyfile("""
+ import pytest
+
+ @pytest.fixture(scope='module', autouse={autouse})
+ def m1(): pass
+
+ if {use_mark}:
+ pytestmark = pytest.mark.usefixtures('m1')
+
+ @pytest.fixture(scope='function', autouse=True)
+ def f1(): pass
+
+ def test_func(m1):
+ pass
+ """.format(autouse=not use_mark, use_mark=use_mark))
+ items, _ = testdir.inline_genitems()
+ request = FixtureRequest(items[0])
+ assert request.fixturenames == 'm1 f1'.split()
+
+ def test_func_closure_with_native_fixtures(self, testdir, monkeypatch):
+ """Sanity check that verifies the order returned by the closures and the actual fixture execution order:
+ The execution order may differ because of fixture inter-dependencies.
+ """
+ monkeypatch.setattr(pytest, 'FIXTURE_ORDER', [], raising=False)
+ testdir.makepyfile("""
+ import pytest
+
+ FIXTURE_ORDER = pytest.FIXTURE_ORDER
+
+ @pytest.fixture(scope="session")
+ def s1():
+ FIXTURE_ORDER.append('s1')
+
+ @pytest.fixture(scope="module")
+ def m1():
+ FIXTURE_ORDER.append('m1')
+
+ @pytest.fixture(scope='session')
+ def my_tmpdir_factory():
+ FIXTURE_ORDER.append('my_tmpdir_factory')
+
+ @pytest.fixture
+ def my_tmpdir(my_tmpdir_factory):
+ FIXTURE_ORDER.append('my_tmpdir')
+
+ @pytest.fixture
+ def f1(my_tmpdir):
+ FIXTURE_ORDER.append('f1')
+
+ @pytest.fixture
+ def f2():
+ FIXTURE_ORDER.append('f2')
+
+ def test_foo(f1, m1, f2, s1): pass
+ """)
+ items, _ = testdir.inline_genitems()
+ request = FixtureRequest(items[0])
+ # order of fixtures based on their scope and position in the parameter list
+ assert request.fixturenames == 's1 my_tmpdir_factory m1 f1 f2 my_tmpdir'.split()
+ testdir.runpytest()
+ # actual fixture execution differs: dependent fixtures must be created first ("my_tmpdir")
+ assert pytest.FIXTURE_ORDER == 's1 my_tmpdir_factory m1 my_tmpdir f1 f2'.split()
+
+ def test_func_closure_module(self, testdir):
+ testdir.makepyfile("""
+ import pytest
+
+ @pytest.fixture(scope='module')
+ def m1(): pass
+
+ @pytest.fixture(scope='function')
+ def f1(): pass
+
+ def test_func(f1, m1):
+ pass
+ """)
+ items, _ = testdir.inline_genitems()
+ request = FixtureRequest(items[0])
+ assert request.fixturenames == 'm1 f1'.split()
+
+ def test_func_closure_scopes_reordered(self, testdir):
+ """Test ensures that fixtures are ordered by scope regardless of the order of the parameters, although
+ fixtures of same scope keep the declared order
+ """
+ testdir.makepyfile("""
+ import pytest
+
+ @pytest.fixture(scope='session')
+ def s1(): pass
+
+ @pytest.fixture(scope='module')
+ def m1(): pass
+
+ @pytest.fixture(scope='function')
+ def f1(): pass
+
+ @pytest.fixture(scope='function')
+ def f2(): pass
+
+ class Test:
+
+ @pytest.fixture(scope='class')
+ def c1(cls): pass
+
+ def test_func(self, f2, f1, c1, m1, s1):
+ pass
+ """)
+ items, _ = testdir.inline_genitems()
+ request = FixtureRequest(items[0])
+ assert request.fixturenames == 's1 m1 c1 f2 f1'.split()
+
+ def test_func_closure_same_scope_closer_root_first(self, testdir):
+ """Auto-use fixtures of same scope are ordered by closer-to-root first"""
+ testdir.makeconftest("""
+ import pytest
+
+ @pytest.fixture(scope='module', autouse=True)
+ def m_conf(): pass
+ """)
+ testdir.makepyfile(**{
+ 'sub/conftest.py': """
+ import pytest
+
+ @pytest.fixture(scope='module', autouse=True)
+ def m_sub(): pass
+ """,
+ 'sub/test_func.py': """
+ import pytest
+
+ @pytest.fixture(scope='module', autouse=True)
+ def m_test(): pass
+
+ @pytest.fixture(scope='function')
+ def f1(): pass
+
+ def test_func(m_test, f1):
+ pass
+ """})
+ items, _ = testdir.inline_genitems()
+ request = FixtureRequest(items[0])
+ assert request.fixturenames == 'm_conf m_sub m_test f1'.split()
+
+ def test_func_closure_all_scopes_complex(self, testdir):
+ """Complex test involving all scopes and mixing autouse with normal fixtures"""
+ testdir.makeconftest("""
+ import pytest
+
+ @pytest.fixture(scope='session')
+ def s1(): pass
+ """)
+ testdir.makepyfile("""
+ import pytest
+
+ @pytest.fixture(scope='module', autouse=True)
+ def m1(): pass
+
+ @pytest.fixture(scope='module')
+ def m2(s1): pass
+
+ @pytest.fixture(scope='function')
+ def f1(): pass
+
+ @pytest.fixture(scope='function')
+ def f2(): pass
+
+ class Test:
+
+ @pytest.fixture(scope='class', autouse=True)
+ def c1(self):
+ pass
+
+ def test_func(self, f2, f1, m2):
+ pass
+ """)
+ items, _ = testdir.inline_genitems()
+ request = FixtureRequest(items[0])
+ assert request.fixturenames == 's1 m1 m2 c1 f2 f1'.split()
diff --git a/testing/python/metafunc.py b/testing/python/metafunc.py
index f2732ef3b..9b70c3305 100644
--- a/testing/python/metafunc.py
+++ b/testing/python/metafunc.py
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
import re
import sys
-
+import attr
import _pytest._code
import py
import pytest
@@ -24,13 +24,19 @@ class TestMetafunc(object):
def __init__(self, names):
self.names_closure = names
+ @attr.s
+ class DefinitionMock(object):
+ obj = attr.ib()
+
names = fixtures.getfuncargnames(func)
fixtureinfo = FixtureInfo(names)
- return python.Metafunc(func, fixtureinfo, config)
+ definition = DefinitionMock(func)
+ return python.Metafunc(definition, fixtureinfo, config)
def test_no_funcargs(self, testdir):
def function():
pass
+
metafunc = self.Metafunc(function)
assert not metafunc.fixturenames
repr(metafunc._calls)
diff --git a/testing/test_cacheprovider.py b/testing/test_cacheprovider.py
index 038fd229e..51e45dd48 100644
--- a/testing/test_cacheprovider.py
+++ b/testing/test_cacheprovider.py
@@ -56,7 +56,7 @@ class TestNewAPI(object):
assert result.ret == 1
result.stdout.fnmatch_lines([
"*could not create cache path*",
- "*1 warnings*",
+ "*2 warnings*",
])
def test_config_cache(self, testdir):
@@ -361,7 +361,7 @@ class TestLastFailed(object):
result = testdir.runpytest('--lf')
result.stdout.fnmatch_lines([
- 'collected 4 items',
+ 'collected 4 items / 2 deselected',
'run-last-failure: rerun previous 2 failures',
'*2 failed, 2 deselected in*',
])
@@ -495,15 +495,15 @@ class TestLastFailed(object):
# Issue #1342
testdir.makepyfile(test_empty='')
testdir.runpytest('-q', '--lf')
- assert not os.path.exists('.pytest_cache')
+ assert not os.path.exists('.pytest_cache/v/cache/lastfailed')
testdir.makepyfile(test_successful='def test_success():\n assert True')
testdir.runpytest('-q', '--lf')
- assert not os.path.exists('.pytest_cache')
+ assert not os.path.exists('.pytest_cache/v/cache/lastfailed')
testdir.makepyfile(test_errored='def test_error():\n assert False')
testdir.runpytest('-q', '--lf')
- assert os.path.exists('.pytest_cache')
+ assert os.path.exists('.pytest_cache/v/cache/lastfailed')
def test_xfail_not_considered_failure(self, testdir):
testdir.makepyfile('''
@@ -603,3 +603,142 @@ class TestLastFailed(object):
result = testdir.runpytest('--last-failed')
result.stdout.fnmatch_lines('*4 passed*')
assert self.get_cached_last_failed(testdir) == []
+
+ def test_lastfailed_no_failures_behavior_all_passed(self, testdir):
+ testdir.makepyfile("""
+ def test_1():
+ assert True
+ def test_2():
+ assert True
+ """)
+ result = testdir.runpytest()
+ result.stdout.fnmatch_lines(["*2 passed*"])
+ result = testdir.runpytest("--lf")
+ result.stdout.fnmatch_lines(["*2 passed*"])
+ result = testdir.runpytest("--lf", "--lfnf", "all")
+ result.stdout.fnmatch_lines(["*2 passed*"])
+ result = testdir.runpytest("--lf", "--lfnf", "none")
+ result.stdout.fnmatch_lines(["*2 desel*"])
+
+ def test_lastfailed_no_failures_behavior_empty_cache(self, testdir):
+ testdir.makepyfile("""
+ def test_1():
+ assert True
+ def test_2():
+ assert False
+ """)
+ result = testdir.runpytest("--lf", "--cache-clear")
+ result.stdout.fnmatch_lines(["*1 failed*1 passed*"])
+ result = testdir.runpytest("--lf", "--cache-clear", "--lfnf", "all")
+ result.stdout.fnmatch_lines(["*1 failed*1 passed*"])
+ result = testdir.runpytest("--lf", "--cache-clear", "--lfnf", "none")
+ result.stdout.fnmatch_lines(["*2 desel*"])
+
+
+class TestNewFirst(object):
+ def test_newfirst_usecase(self, testdir):
+ testdir.makepyfile(**{
+ 'test_1/test_1.py': '''
+ def test_1(): assert 1
+ def test_2(): assert 1
+ def test_3(): assert 1
+ ''',
+ 'test_2/test_2.py': '''
+ def test_1(): assert 1
+ def test_2(): assert 1
+ def test_3(): assert 1
+ '''
+ })
+
+ testdir.tmpdir.join('test_1/test_1.py').setmtime(1)
+
+ result = testdir.runpytest("-v")
+ result.stdout.fnmatch_lines([
+ "*test_1/test_1.py::test_1 PASSED*",
+ "*test_1/test_1.py::test_2 PASSED*",
+ "*test_1/test_1.py::test_3 PASSED*",
+ "*test_2/test_2.py::test_1 PASSED*",
+ "*test_2/test_2.py::test_2 PASSED*",
+ "*test_2/test_2.py::test_3 PASSED*",
+ ])
+
+ result = testdir.runpytest("-v", "--nf")
+
+ result.stdout.fnmatch_lines([
+ "*test_2/test_2.py::test_1 PASSED*",
+ "*test_2/test_2.py::test_2 PASSED*",
+ "*test_2/test_2.py::test_3 PASSED*",
+ "*test_1/test_1.py::test_1 PASSED*",
+ "*test_1/test_1.py::test_2 PASSED*",
+ "*test_1/test_1.py::test_3 PASSED*",
+ ])
+
+ testdir.tmpdir.join("test_1/test_1.py").write(
+ "def test_1(): assert 1\n"
+ "def test_2(): assert 1\n"
+ "def test_3(): assert 1\n"
+ "def test_4(): assert 1\n"
+ )
+ testdir.tmpdir.join('test_1/test_1.py').setmtime(1)
+
+ result = testdir.runpytest("-v", "--nf")
+
+ result.stdout.fnmatch_lines([
+ "*test_1/test_1.py::test_4 PASSED*",
+ "*test_2/test_2.py::test_1 PASSED*",
+ "*test_2/test_2.py::test_2 PASSED*",
+ "*test_2/test_2.py::test_3 PASSED*",
+ "*test_1/test_1.py::test_1 PASSED*",
+ "*test_1/test_1.py::test_2 PASSED*",
+ "*test_1/test_1.py::test_3 PASSED*",
+ ])
+
+ def test_newfirst_parametrize(self, testdir):
+ testdir.makepyfile(**{
+ 'test_1/test_1.py': '''
+ import pytest
+ @pytest.mark.parametrize('num', [1, 2])
+ def test_1(num): assert num
+ ''',
+ 'test_2/test_2.py': '''
+ import pytest
+ @pytest.mark.parametrize('num', [1, 2])
+ def test_1(num): assert num
+ '''
+ })
+
+ testdir.tmpdir.join('test_1/test_1.py').setmtime(1)
+
+ result = testdir.runpytest("-v")
+ result.stdout.fnmatch_lines([
+ "*test_1/test_1.py::test_1[1*",
+ "*test_1/test_1.py::test_1[2*",
+ "*test_2/test_2.py::test_1[1*",
+ "*test_2/test_2.py::test_1[2*"
+ ])
+
+ result = testdir.runpytest("-v", "--nf")
+
+ result.stdout.fnmatch_lines([
+ "*test_2/test_2.py::test_1[1*",
+ "*test_2/test_2.py::test_1[2*",
+ "*test_1/test_1.py::test_1[1*",
+ "*test_1/test_1.py::test_1[2*",
+ ])
+
+ testdir.tmpdir.join("test_1/test_1.py").write(
+ "import pytest\n"
+ "@pytest.mark.parametrize('num', [1, 2, 3])\n"
+ "def test_1(num): assert num\n"
+ )
+ testdir.tmpdir.join('test_1/test_1.py').setmtime(1)
+
+ result = testdir.runpytest("-v", "--nf")
+
+ result.stdout.fnmatch_lines([
+ "*test_1/test_1.py::test_1[3*",
+ "*test_2/test_2.py::test_1[1*",
+ "*test_2/test_2.py::test_1[2*",
+ "*test_1/test_1.py::test_1[1*",
+ "*test_1/test_1.py::test_1[2*",
+ ])
diff --git a/testing/test_doctest.py b/testing/test_doctest.py
index b15067f15..314398395 100644
--- a/testing/test_doctest.py
+++ b/testing/test_doctest.py
@@ -756,6 +756,27 @@ class TestDoctestSkips(object):
reprec = testdir.inline_run("--doctest-modules")
reprec.assertoutcome(passed=0, skipped=0)
+ def test_continue_on_failure(self, testdir):
+ testdir.maketxtfile(test_something="""
+ >>> i = 5
+ >>> def foo():
+ ... raise ValueError('error1')
+ >>> foo()
+ >>> i
+ >>> i + 2
+ 7
+ >>> i + 1
+ """)
+ result = testdir.runpytest("--doctest-modules", "--doctest-continue-on-failure")
+ result.assert_outcomes(passed=0, failed=1)
+ # The lines that contains the failure are 4, 5, and 8. The first one
+ # is a stack trace and the other two are mismatches.
+ result.stdout.fnmatch_lines([
+ "*4: UnexpectedException*",
+ "*5: DocTestFailure*",
+ "*8: DocTestFailure*",
+ ])
+
class TestDoctestAutoUseFixtures(object):
diff --git a/testing/test_junitxml.py b/testing/test_junitxml.py
index 49318ef76..b8bbd888f 100644
--- a/testing/test_junitxml.py
+++ b/testing/test_junitxml.py
@@ -328,23 +328,28 @@ class TestPython(object):
fnode.assert_attr(message="internal error")
assert "Division" in fnode.toxml()
- def test_failure_function(self, testdir):
+ @pytest.mark.parametrize('junit_logging', ['no', 'system-out', 'system-err'])
+ def test_failure_function(self, testdir, junit_logging):
testdir.makepyfile("""
+ import logging
import sys
+
def test_fail():
print ("hello-stdout")
sys.stderr.write("hello-stderr\\n")
+ logging.info('info msg')
+ logging.warning('warning msg')
raise ValueError(42)
""")
- result, dom = runandparse(testdir)
+ result, dom = runandparse(testdir, '-o', 'junit_logging=%s' % junit_logging)
assert result.ret
node = dom.find_first_by_tag("testsuite")
node.assert_attr(failures=1, tests=1)
tnode = node.find_first_by_tag("testcase")
tnode.assert_attr(
file="test_failure_function.py",
- line="1",
+ line="3",
classname="test_failure_function",
name="test_fail")
fnode = tnode.find_first_by_tag("failure")
@@ -353,9 +358,21 @@ class TestPython(object):
systemout = fnode.next_siebling
assert systemout.tag == "system-out"
assert "hello-stdout" in systemout.toxml()
+ assert "info msg" not in systemout.toxml()
systemerr = systemout.next_siebling
assert systemerr.tag == "system-err"
assert "hello-stderr" in systemerr.toxml()
+ assert "info msg" not in systemerr.toxml()
+
+ if junit_logging == 'system-out':
+ assert "warning msg" in systemout.toxml()
+ assert "warning msg" not in systemerr.toxml()
+ elif junit_logging == 'system-err':
+ assert "warning msg" not in systemout.toxml()
+ assert "warning msg" in systemerr.toxml()
+ elif junit_logging == 'no':
+ assert "warning msg" not in systemout.toxml()
+ assert "warning msg" not in systemerr.toxml()
def test_failure_verbose_message(self, testdir):
testdir.makepyfile("""
@@ -846,10 +863,10 @@ def test_record_property(testdir):
import pytest
@pytest.fixture
- def other(record_xml_property):
- record_xml_property("bar", 1)
- def test_record(record_xml_property, other):
- record_xml_property("foo", "<1");
+ def other(record_property):
+ record_property("bar", 1)
+ def test_record(record_property, other):
+ record_property("foo", "<1");
""")
result, dom = runandparse(testdir, '-rw')
node = dom.find_first_by_tag("testsuite")
@@ -860,15 +877,15 @@ def test_record_property(testdir):
pnodes[1].assert_attr(name="foo", value="<1")
result.stdout.fnmatch_lines([
'test_record_property.py::test_record',
- '*record_xml_property*experimental*',
+ '*record_property*experimental*',
])
def test_record_property_same_name(testdir):
testdir.makepyfile("""
- def test_record_with_same_name(record_xml_property):
- record_xml_property("foo", "bar")
- record_xml_property("foo", "baz")
+ def test_record_with_same_name(record_property):
+ record_property("foo", "bar")
+ record_property("foo", "baz")
""")
result, dom = runandparse(testdir, '-rw')
node = dom.find_first_by_tag("testsuite")
diff --git a/testing/test_mark.py b/testing/test_mark.py
index b4dd65634..9ec1ce75a 100644
--- a/testing/test_mark.py
+++ b/testing/test_mark.py
@@ -8,11 +8,13 @@ from _pytest.mark import (
EMPTY_PARAMETERSET_OPTION,
)
+ignore_markinfo = pytest.mark.filterwarnings('ignore:MarkInfo objects:_pytest.deprecated.RemovedInPytest4Warning')
+
class TestMark(object):
def test_markinfo_repr(self):
from _pytest.mark import MarkInfo, Mark
- m = MarkInfo(Mark("hello", (1, 2), {}))
+ m = MarkInfo.for_mark(Mark("hello", (1, 2), {}))
repr(m)
@pytest.mark.parametrize('attr', ['mark', 'param'])
@@ -51,6 +53,7 @@ class TestMark(object):
mark.hello(f)
assert f.hello
+ @ignore_markinfo
def test_pytest_mark_keywords(self):
mark = Mark()
@@ -62,6 +65,7 @@ class TestMark(object):
assert f.world.kwargs['x'] == 3
assert f.world.kwargs['y'] == 4
+ @ignore_markinfo
def test_apply_multiple_and_merge(self):
mark = Mark()
@@ -78,6 +82,7 @@ class TestMark(object):
assert f.world.kwargs['y'] == 1
assert len(f.world.args) == 0
+ @ignore_markinfo
def test_pytest_mark_positional(self):
mark = Mark()
@@ -88,6 +93,7 @@ class TestMark(object):
assert f.world.args[0] == "hello"
mark.world("world")(f)
+ @ignore_markinfo
def test_pytest_mark_positional_func_and_keyword(self):
mark = Mark()
@@ -103,6 +109,7 @@ class TestMark(object):
assert g.world.args[0] is f
assert g.world.kwargs["omega"] == "hello"
+ @ignore_markinfo
def test_pytest_mark_reuse(self):
mark = Mark()
@@ -484,6 +491,7 @@ class TestFunctional(object):
assert 'hello' in keywords
assert 'world' in keywords
+ @ignore_markinfo
def test_merging_markers(self, testdir):
p = testdir.makepyfile("""
import pytest
@@ -509,7 +517,6 @@ class TestFunctional(object):
assert values[1].args == ()
assert values[2].args == ("pos1", )
- @pytest.mark.xfail(reason='unfixed')
def test_merging_markers_deep(self, testdir):
# issue 199 - propagate markers into nested classes
p = testdir.makepyfile("""
@@ -526,7 +533,7 @@ class TestFunctional(object):
items, rec = testdir.inline_genitems(p)
for item in items:
print(item, item.keywords)
- assert 'a' in item.keywords
+ assert [x for x in item.iter_markers() if x.name == 'a']
def test_mark_decorator_subclass_does_not_propagate_to_base(self, testdir):
p = testdir.makepyfile("""
@@ -622,6 +629,7 @@ class TestFunctional(object):
"keyword: *hello*"
])
+ @ignore_markinfo
def test_merging_markers_two_functions(self, testdir):
p = testdir.makepyfile("""
import pytest
@@ -676,6 +684,7 @@ class TestFunctional(object):
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
+ @ignore_markinfo
def test_keyword_added_for_session(self, testdir):
testdir.makeconftest("""
import pytest
@@ -715,8 +724,8 @@ class TestFunctional(object):
if isinstance(v, MarkInfo)])
assert marker_names == set(expected_markers)
- @pytest.mark.xfail(reason='callspec2.setmulti misuses keywords')
@pytest.mark.issue1540
+ @pytest.mark.filterwarnings("ignore")
def test_mark_from_parameters(self, testdir):
testdir.makepyfile("""
import pytest
diff --git a/testing/test_pdb.py b/testing/test_pdb.py
index 0f5196751..85817f79b 100644
--- a/testing/test_pdb.py
+++ b/testing/test_pdb.py
@@ -216,6 +216,42 @@ class TestPDB(object):
assert "captured stderr" not in output
self.flush(child)
+ @pytest.mark.parametrize('showcapture', ['all', 'no', 'log'])
+ def test_pdb_print_captured_logs(self, testdir, showcapture):
+ p1 = testdir.makepyfile("""
+ def test_1():
+ import logging
+ logging.warn("get " + "rekt")
+ assert False
+ """)
+ child = testdir.spawn_pytest("--show-capture=%s --pdb %s" % (showcapture, p1))
+ if showcapture in ('all', 'log'):
+ child.expect("captured log")
+ child.expect("get rekt")
+ child.expect("(Pdb)")
+ child.sendeof()
+ rest = child.read().decode("utf8")
+ assert "1 failed" in rest
+ self.flush(child)
+
+ def test_pdb_print_captured_logs_nologging(self, testdir):
+ p1 = testdir.makepyfile("""
+ def test_1():
+ import logging
+ logging.warn("get " + "rekt")
+ assert False
+ """)
+ child = testdir.spawn_pytest("--show-capture=all --pdb "
+ "-p no:logging %s" % p1)
+ child.expect("get rekt")
+ output = child.before.decode("utf8")
+ assert "captured log" not in output
+ child.expect("(Pdb)")
+ child.sendeof()
+ rest = child.read().decode("utf8")
+ assert "1 failed" in rest
+ self.flush(child)
+
def test_pdb_interaction_exception(self, testdir):
p1 = testdir.makepyfile("""
import pytest
diff --git a/testing/test_resultlog.py b/testing/test_resultlog.py
index 45fed7078..b1760721c 100644
--- a/testing/test_resultlog.py
+++ b/testing/test_resultlog.py
@@ -13,7 +13,7 @@ def test_generic_path(testdir):
from _pytest.main import Session
config = testdir.parseconfig()
session = Session(config)
- p1 = Node('a', config=config, session=session)
+ p1 = Node('a', config=config, session=session, nodeid='a')
# assert p1.fspath is None
p2 = Node('B', parent=p1)
p3 = Node('()', parent=p2)
diff --git a/testing/test_session.py b/testing/test_session.py
index 9ec13f523..32d8ce689 100644
--- a/testing/test_session.py
+++ b/testing/test_session.py
@@ -1,4 +1,5 @@
from __future__ import absolute_import, division, print_function
+
import pytest
from _pytest.main import EXIT_NOTESTSCOLLECTED
@@ -239,6 +240,20 @@ def test_exclude(testdir):
result.stdout.fnmatch_lines(["*1 passed*"])
+def test_deselect(testdir):
+ testdir.makepyfile(test_a="""
+ import pytest
+ def test_a1(): pass
+ @pytest.mark.parametrize('b', range(3))
+ def test_a2(b): pass
+ """)
+ result = testdir.runpytest("-v", "--deselect=test_a.py::test_a2[1]", "--deselect=test_a.py::test_a2[2]")
+ assert result.ret == 0
+ result.stdout.fnmatch_lines(["*2 passed, 2 deselected*"])
+ for line in result.stdout.lines:
+ assert not line.startswith(('test_a.py::test_a2[1]', 'test_a.py::test_a2[2]'))
+
+
def test_sessionfinish_with_start(testdir):
testdir.makeconftest("""
import os
@@ -253,3 +268,32 @@ def test_sessionfinish_with_start(testdir):
""")
res = testdir.runpytest("--collect-only")
assert res.ret == EXIT_NOTESTSCOLLECTED
+
+
+@pytest.mark.parametrize("path", ["root", "{relative}/root", "{environment}/root"])
+def test_rootdir_option_arg(testdir, monkeypatch, path):
+ monkeypatch.setenv('PY_ROOTDIR_PATH', str(testdir.tmpdir))
+ path = path.format(relative=str(testdir.tmpdir),
+ environment='$PY_ROOTDIR_PATH')
+
+ rootdir = testdir.mkdir("root")
+ rootdir.mkdir("tests")
+ testdir.makepyfile("""
+ import os
+ def test_one():
+ assert 1
+ """)
+
+ result = testdir.runpytest("--rootdir={}".format(path))
+ result.stdout.fnmatch_lines(['*rootdir: {}/root, inifile:*'.format(testdir.tmpdir), "*1 passed*"])
+
+
+def test_rootdir_wrong_option_arg(testdir):
+ testdir.makepyfile("""
+ import os
+ def test_one():
+ assert 1
+ """)
+
+ result = testdir.runpytest("--rootdir=wrong_dir")
+ result.stderr.fnmatch_lines(["*Directory *wrong_dir* not found. Check your '--rootdir' option.*"])
diff --git a/testing/test_skipping.py b/testing/test_skipping.py
index db4e6d3f7..90562c939 100644
--- a/testing/test_skipping.py
+++ b/testing/test_skipping.py
@@ -156,6 +156,21 @@ class TestXFail(object):
assert callreport.passed
assert callreport.wasxfail == "this is an xfail"
+ def test_xfail_using_platform(self, testdir):
+ """
+ Verify that platform can be used with xfail statements.
+ """
+ item = testdir.getitem("""
+ import pytest
+ @pytest.mark.xfail("platform.platform() == platform.platform()")
+ def test_func():
+ assert 0
+ """)
+ reports = runtestprotocol(item, log=False)
+ assert len(reports) == 3
+ callreport = reports[1]
+ assert callreport.wasxfail
+
def test_xfail_xpassed_strict(self, testdir):
item = testdir.getitem("""
import pytest
@@ -612,6 +627,16 @@ class TestSkipif(object):
])
assert result.ret == 0
+ def test_skipif_using_platform(self, testdir):
+ item = testdir.getitem("""
+ import pytest
+ @pytest.mark.skipif("platform.platform() == platform.platform()")
+ def test_func():
+ pass
+ """)
+ pytest.raises(pytest.skip.Exception, lambda:
+ pytest_runtest_setup(item))
+
@pytest.mark.parametrize('marker, msg1, msg2', [
('skipif', 'SKIP', 'skipped'),
('xfail', 'XPASS', 'xpassed'),
@@ -1065,3 +1090,18 @@ def test_mark_xfail_item(testdir):
assert not failed
xfailed = [r for r in skipped if hasattr(r, 'wasxfail')]
assert xfailed
+
+
+def test_summary_list_after_errors(testdir):
+ """Ensure the list of errors/fails/xfails/skips appears after tracebacks in terminal reporting."""
+ testdir.makepyfile("""
+ import pytest
+ def test_fail():
+ assert 0
+ """)
+ result = testdir.runpytest('-ra')
+ result.stdout.fnmatch_lines([
+ '=* FAILURES *=',
+ '*= short test summary info =*',
+ 'FAIL test_summary_list_after_errors.py::test_fail',
+ ])
diff --git a/testing/test_terminal.py b/testing/test_terminal.py
index ff4296925..8ef25062e 100644
--- a/testing/test_terminal.py
+++ b/testing/test_terminal.py
@@ -32,16 +32,19 @@ class Option(object):
return values
-def pytest_generate_tests(metafunc):
- if "option" in metafunc.fixturenames:
- metafunc.addcall(id="default",
- funcargs={'option': Option(verbose=False)})
- metafunc.addcall(id="verbose",
- funcargs={'option': Option(verbose=True)})
- metafunc.addcall(id="quiet",
- funcargs={'option': Option(verbose=-1)})
- metafunc.addcall(id="fulltrace",
- funcargs={'option': Option(fulltrace=True)})
+@pytest.fixture(params=[
+ Option(verbose=False),
+ Option(verbose=True),
+ Option(verbose=-1),
+ Option(fulltrace=True),
+], ids=[
+ "default",
+ "verbose",
+ "quiet",
+ "fulltrace",
+])
+def option(request):
+ return request.param
@pytest.mark.parametrize('input,expected', [
@@ -431,11 +434,36 @@ class TestTerminalFunctional(object):
)
result = testdir.runpytest("-k", "test_two:", testpath)
result.stdout.fnmatch_lines([
+ "collected 3 items / 1 deselected",
"*test_deselected.py ..*",
- "=* 1 test*deselected *=",
])
assert result.ret == 0
+ def test_show_deselected_items_using_markexpr_before_test_execution(
+ self, testdir):
+ testdir.makepyfile("""
+ import pytest
+
+ @pytest.mark.foo
+ def test_foobar():
+ pass
+
+ @pytest.mark.bar
+ def test_bar():
+ pass
+
+ def test_pass():
+ pass
+ """)
+ result = testdir.runpytest('-m', 'not foo')
+ result.stdout.fnmatch_lines([
+ "collected 3 items / 1 deselected",
+ "*test_show_des*.py ..*",
+ "*= 2 passed, 1 deselected in * =*",
+ ])
+ assert "= 1 deselected =" not in result.stdout.str()
+ assert result.ret == 0
+
def test_no_skip_summary_if_failure(self, testdir):
testdir.makepyfile("""
import pytest
@@ -657,10 +685,12 @@ def test_color_yes_collection_on_non_atty(testdir, verbose):
def test_getreportopt():
- class config(object):
- class option(object):
+ class Config(object):
+ class Option(object):
reportchars = ""
disable_warnings = True
+ option = Option()
+ config = Config()
config.option.reportchars = "sf"
assert getreportopt(config) == "sf"
@@ -823,6 +853,51 @@ def pytest_report_header(config, startdir):
str(testdir.tmpdir),
])
+ def test_show_capture(self, testdir):
+ testdir.makepyfile("""
+ import sys
+ import logging
+ def test_one():
+ sys.stdout.write('!This is stdout!')
+ sys.stderr.write('!This is stderr!')
+ logging.warning('!This is a warning log msg!')
+ assert False, 'Something failed'
+ """)
+
+ result = testdir.runpytest("--tb=short")
+ result.stdout.fnmatch_lines(["!This is stdout!",
+ "!This is stderr!",
+ "*WARNING*!This is a warning log msg!"])
+
+ result = testdir.runpytest("--show-capture=all", "--tb=short")
+ result.stdout.fnmatch_lines(["!This is stdout!",
+ "!This is stderr!",
+ "*WARNING*!This is a warning log msg!"])
+
+ stdout = testdir.runpytest(
+ "--show-capture=stdout", "--tb=short").stdout.str()
+ assert "!This is stderr!" not in stdout
+ assert "!This is stdout!" in stdout
+ assert "!This is a warning log msg!" not in stdout
+
+ stdout = testdir.runpytest(
+ "--show-capture=stderr", "--tb=short").stdout.str()
+ assert "!This is stdout!" not in stdout
+ assert "!This is stderr!" in stdout
+ assert "!This is a warning log msg!" not in stdout
+
+ stdout = testdir.runpytest(
+ "--show-capture=log", "--tb=short").stdout.str()
+ assert "!This is stdout!" not in stdout
+ assert "!This is stderr!" not in stdout
+ assert "!This is a warning log msg!" in stdout
+
+ stdout = testdir.runpytest(
+ "--show-capture=no", "--tb=short").stdout.str()
+ assert "!This is stdout!" not in stdout
+ assert "!This is stderr!" not in stdout
+ assert "!This is a warning log msg!" not in stdout
+
@pytest.mark.xfail("not hasattr(os, 'dup')")
def test_fdopen_kept_alive_issue124(testdir):