summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--AUTHORS2
-rw-r--r--changelog/4147.feature.rst1
-rw-r--r--doc/en/cache.rst6
-rw-r--r--src/_pytest/config/__init__.py1
-rw-r--r--src/_pytest/stepwise.py102
-rw-r--r--testing/test_cacheprovider.py3
-rw-r--r--testing/test_stepwise.py148
7 files changed, 262 insertions, 1 deletions
diff --git a/AUTHORS b/AUTHORS
index e5bf56a65..dabeb1c06 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -59,6 +59,7 @@ Danielle Jenkins
Dave Hunt
David Díaz-Barquero
David Mohr
+David Szotten
David Vierra
Daw-Ran Liou
Denis Kirisov
@@ -161,6 +162,7 @@ Miro Hrončok
Nathaniel Waisbrot
Ned Batchelder
Neven Mundar
+Niclas Olofsson
Nicolas Delaby
Oleg Pidsadnyi
Oleg Sushchenko
diff --git a/changelog/4147.feature.rst b/changelog/4147.feature.rst
new file mode 100644
index 000000000..812898f90
--- /dev/null
+++ b/changelog/4147.feature.rst
@@ -0,0 +1 @@
+Add ``-sw``, ``--stepwise`` as an alternative to ``--lf -x`` for stopping at the first failure, but starting the next test invocation from that test. See `the documentation <https://docs.pytest.org/en/latest/cache.html#stepwise>`_ for more info.
diff --git a/doc/en/cache.rst b/doc/en/cache.rst
index 08f204655..245edfc1b 100644
--- a/doc/en/cache.rst
+++ b/doc/en/cache.rst
@@ -260,3 +260,9 @@ by adding the ``--cache-clear`` option like this::
This is recommended for invocations from Continuous Integration
servers where isolation and correctness is more important
than speed.
+
+
+Stepwise
+--------
+
+As an alternative to ``--lf -x``, especially for cases where you expect a large part of the test suite will fail, ``--sw``, ``--stepwise`` allows you to fix them one at a time. The test suite will run until the first failure and then stop. At the next invocation, tests will continue from the last failing test and then run until the next failing test. You may use the ``--stepwise-skip`` option to ignore one failing test and stop the test execution on the second failing test instead. This is useful if you get stuck on a failing test and just want to ignore it until later.
diff --git a/src/_pytest/config/__init__.py b/src/_pytest/config/__init__.py
index c7bde9979..f6a67d560 100644
--- a/src/_pytest/config/__init__.py
+++ b/src/_pytest/config/__init__.py
@@ -134,6 +134,7 @@ default_plugins = (
"freeze_support",
"setuponly",
"setupplan",
+ "stepwise",
"warnings",
"logging",
)
diff --git a/src/_pytest/stepwise.py b/src/_pytest/stepwise.py
new file mode 100644
index 000000000..1efa2e7ca
--- /dev/null
+++ b/src/_pytest/stepwise.py
@@ -0,0 +1,102 @@
+import pytest
+
+
+def pytest_addoption(parser):
+ group = parser.getgroup("general")
+ group.addoption(
+ "--sw",
+ "--stepwise",
+ action="store_true",
+ dest="stepwise",
+ help="exit on test fail and continue from last failing test next time",
+ )
+ group.addoption(
+ "--stepwise-skip",
+ action="store_true",
+ dest="stepwise_skip",
+ help="ignore the first failing test but stop on the next failing test",
+ )
+
+
+@pytest.hookimpl
+def pytest_configure(config):
+ config.pluginmanager.register(StepwisePlugin(config), "stepwiseplugin")
+
+
+class StepwisePlugin:
+ def __init__(self, config):
+ self.config = config
+ self.active = config.getvalue("stepwise")
+ self.session = None
+
+ if self.active:
+ self.lastfailed = config.cache.get("cache/stepwise", None)
+ self.skip = config.getvalue("stepwise_skip")
+
+ def pytest_sessionstart(self, session):
+ self.session = session
+
+ def pytest_collection_modifyitems(self, session, config, items):
+ if not self.active or not self.lastfailed:
+ return
+
+ already_passed = []
+ found = False
+
+ # Make a list of all tests that have been run before the last failing one.
+ for item in items:
+ if item.nodeid == self.lastfailed:
+ found = True
+ break
+ else:
+ already_passed.append(item)
+
+ # If the previously failed test was not found among the test items,
+ # do not skip any tests.
+ if not found:
+ already_passed = []
+
+ for item in already_passed:
+ items.remove(item)
+
+ config.hook.pytest_deselected(items=already_passed)
+
+ def pytest_collectreport(self, report):
+ if self.active and report.failed:
+ self.session.shouldstop = (
+ "Error when collecting test, stopping test execution."
+ )
+
+ def pytest_runtest_logreport(self, report):
+ # Skip this hook if plugin is not active or the test is xfailed.
+ if not self.active or "xfail" in report.keywords:
+ return
+
+ if report.failed:
+ if self.skip:
+ # Remove test from the failed ones (if it exists) and unset the skip option
+ # to make sure the following tests will not be skipped.
+ if report.nodeid == self.lastfailed:
+ self.lastfailed = None
+
+ self.skip = False
+ else:
+ # Mark test as the last failing and interrupt the test session.
+ self.lastfailed = report.nodeid
+ self.session.shouldstop = (
+ "Test failed, continuing from this test next run."
+ )
+
+ else:
+ # If the test was actually run and did pass.
+ if report.when == "call":
+ # Remove test from the failed ones, if exists.
+ if report.nodeid == self.lastfailed:
+ self.lastfailed = None
+
+ def pytest_sessionfinish(self, session):
+ if self.active:
+ self.config.cache.set("cache/stepwise", self.lastfailed)
+ else:
+ # Clear the list of failing tests if the plugin is not active.
+ self.config.cache.set("cache/stepwise", [])
diff --git a/testing/test_cacheprovider.py b/testing/test_cacheprovider.py
index 43b8392e5..cd888dce1 100644
--- a/testing/test_cacheprovider.py
+++ b/testing/test_cacheprovider.py
@@ -66,7 +66,8 @@ class TestNewAPI(object):
)
result = testdir.runpytest("-rw")
assert result.ret == 1
- result.stdout.fnmatch_lines(["*could not create cache path*", "*2 warnings*"])
+ # warnings from nodeids, lastfailed, and stepwise
+ result.stdout.fnmatch_lines(["*could not create cache path*", "*3 warnings*"])
def test_config_cache(self, testdir):
testdir.makeconftest(
diff --git a/testing/test_stepwise.py b/testing/test_stepwise.py
new file mode 100644
index 000000000..ad9b77296
--- /dev/null
+++ b/testing/test_stepwise.py
@@ -0,0 +1,148 @@
+import pytest
+
+
+@pytest.fixture
+def stepwise_testdir(testdir):
+ # Rather than having to modify our testfile between tests, we introduce
+ # a flag for wether or not the second test should fail.
+ testdir.makeconftest(
+ """
+def pytest_addoption(parser):
+ group = parser.getgroup('general')
+ group.addoption('--fail', action='store_true', dest='fail')
+ group.addoption('--fail-last', action='store_true', dest='fail_last')
+"""
+ )
+
+ # Create a simple test suite.
+ testdir.makepyfile(
+ test_a="""
+def test_success_before_fail():
+ assert 1
+
+def test_fail_on_flag(request):
+ assert not request.config.getvalue('fail')
+
+def test_success_after_fail():
+ assert 1
+
+def test_fail_last_on_flag(request):
+ assert not request.config.getvalue('fail_last')
+
+def test_success_after_last_fail():
+ assert 1
+"""
+ )
+
+ testdir.makepyfile(
+ test_b="""
+def test_success():
+ assert 1
+"""
+ )
+
+ return testdir
+
+
+@pytest.fixture
+def error_testdir(testdir):
+ testdir.makepyfile(
+ test_a="""
+def test_error(nonexisting_fixture):
+ assert 1
+
+def test_success_after_fail():
+ assert 1
+"""
+ )
+
+ return testdir
+
+
+@pytest.fixture
+def broken_testdir(testdir):
+ testdir.makepyfile(
+ working_testfile="def test_proper(): assert 1", broken_testfile="foobar"
+ )
+ return testdir
+
+
+def test_run_without_stepwise(stepwise_testdir):
+ result = stepwise_testdir.runpytest("-v", "--strict", "--fail")
+
+ result.stdout.fnmatch_lines(["*test_success_before_fail PASSED*"])
+ result.stdout.fnmatch_lines(["*test_fail_on_flag FAILED*"])
+ result.stdout.fnmatch_lines(["*test_success_after_fail PASSED*"])
+
+
+def test_fail_and_continue_with_stepwise(stepwise_testdir):
+ # Run the tests with a failing second test.
+ result = stepwise_testdir.runpytest("-v", "--strict", "--stepwise", "--fail")
+ assert not result.stderr.str()
+
+ stdout = result.stdout.str()
+ # Make sure we stop after first failing test.
+ assert "test_success_before_fail PASSED" in stdout
+ assert "test_fail_on_flag FAILED" in stdout
+ assert "test_success_after_fail" not in stdout
+
+ # "Fix" the test that failed in the last run and run it again.
+ result = stepwise_testdir.runpytest("-v", "--strict", "--stepwise")
+ assert not result.stderr.str()
+
+ stdout = result.stdout.str()
+ # Make sure the latest failing test runs and then continues.
+ assert "test_success_before_fail" not in stdout
+ assert "test_fail_on_flag PASSED" in stdout
+ assert "test_success_after_fail PASSED" in stdout
+
+
+def test_run_with_skip_option(stepwise_testdir):
+ result = stepwise_testdir.runpytest(
+ "-v", "--strict", "--stepwise", "--stepwise-skip", "--fail", "--fail-last"
+ )
+ assert not result.stderr.str()
+
+ stdout = result.stdout.str()
+ # Make sure first fail is ignore and second fail stops the test run.
+ assert "test_fail_on_flag FAILED" in stdout
+ assert "test_success_after_fail PASSED" in stdout
+ assert "test_fail_last_on_flag FAILED" in stdout
+ assert "test_success_after_last_fail" not in stdout
+
+
+def test_fail_on_errors(error_testdir):
+ result = error_testdir.runpytest("-v", "--strict", "--stepwise")
+
+ assert not result.stderr.str()
+ stdout = result.stdout.str()
+
+ assert "test_error ERROR" in stdout
+ assert "test_success_after_fail" not in stdout
+
+
+def test_change_testfile(stepwise_testdir):
+ result = stepwise_testdir.runpytest(
+ "-v", "--strict", "--stepwise", "--fail", "test_a.py"
+ )
+ assert not result.stderr.str()
+
+ stdout = result.stdout.str()
+ assert "test_fail_on_flag FAILED" in stdout
+
+ # Make sure the second test run starts from the beginning, since the
+ # test to continue from does not exist in testfile_b.
+ result = stepwise_testdir.runpytest("-v", "--strict", "--stepwise", "test_b.py")
+ assert not result.stderr.str()
+
+ stdout = result.stdout.str()
+ assert "test_success PASSED" in stdout
+
+
+def test_stop_on_collection_errors(broken_testdir):
+ result = broken_testdir.runpytest(
+ "-v", "--strict", "--stepwise", "working_testfile.py", "broken_testfile.py"
+ )
+
+ stdout = result.stdout.str()
+ assert "errors during collection" in stdout