summaryrefslogtreecommitdiff
path: root/src/_pytest/skipping.py
diff options
context:
space:
mode:
authorRan Benita <ran@unusedvar.com>2020-02-21 17:03:46 +0200
committerRan Benita <ran@unusedvar.com>2020-02-28 14:34:44 +0200
commitd636fcd557c543f31859eb8ead4bc4740ac5d5cd (patch)
tree1fff9a122ff171ce6945ea393a3c79245aafc723 /src/_pytest/skipping.py
parentf77d606d4eeba38352bd3ea960f44b171fe06c1f (diff)
downloadpytest-d636fcd557c543f31859eb8ead4bc4740ac5d5cd.tar.gz
Add a typing-compatible mechanism for ad-hoc attributes on various objects
pytest has several instances where plugins set their own attributes on objects they receive in hooks, like nodes and config. Since plugins are detached from these object's definition by design, this causes a problem for type checking because these attributes are not defined and mypy complains. Fix this by giving these objects a "store" which can be used by plugins in a type-safe manner. Currently this mechanism is private. We can consider exposing it at a later point.
Diffstat (limited to 'src/_pytest/skipping.py')
-rw-r--r--src/_pytest/skipping.py34
1 files changed, 20 insertions, 14 deletions
diff --git a/src/_pytest/skipping.py b/src/_pytest/skipping.py
index f70ef7f59..fe8742c66 100644
--- a/src/_pytest/skipping.py
+++ b/src/_pytest/skipping.py
@@ -4,6 +4,12 @@ from _pytest.mark.evaluate import MarkEvaluator
from _pytest.outcomes import fail
from _pytest.outcomes import skip
from _pytest.outcomes import xfail
+from _pytest.store import StoreKey
+
+
+skipped_by_mark_key = StoreKey[bool]()
+evalxfail_key = StoreKey[MarkEvaluator]()
+unexpectedsuccess_key = StoreKey[str]()
def pytest_addoption(parser):
@@ -68,14 +74,14 @@ def pytest_configure(config):
@hookimpl(tryfirst=True)
def pytest_runtest_setup(item):
# Check if skip or skipif are specified as pytest marks
- item._skipped_by_mark = False
+ item._store[skipped_by_mark_key] = False
eval_skipif = MarkEvaluator(item, "skipif")
if eval_skipif.istrue():
- item._skipped_by_mark = True
+ item._store[skipped_by_mark_key] = True
skip(eval_skipif.getexplanation())
for skip_info in item.iter_markers(name="skip"):
- item._skipped_by_mark = True
+ item._store[skipped_by_mark_key] = True
if "reason" in skip_info.kwargs:
skip(skip_info.kwargs["reason"])
elif skip_info.args:
@@ -83,7 +89,7 @@ def pytest_runtest_setup(item):
else:
skip("unconditional skip")
- item._evalxfail = MarkEvaluator(item, "xfail")
+ item._store[evalxfail_key] = MarkEvaluator(item, "xfail")
check_xfail_no_run(item)
@@ -99,7 +105,7 @@ def pytest_pyfunc_call(pyfuncitem):
def check_xfail_no_run(item):
"""check xfail(run=False)"""
if not item.config.option.runxfail:
- evalxfail = item._evalxfail
+ evalxfail = item._store[evalxfail_key]
if evalxfail.istrue():
if not evalxfail.get("run", True):
xfail("[NOTRUN] " + evalxfail.getexplanation())
@@ -107,12 +113,12 @@ def check_xfail_no_run(item):
def check_strict_xfail(pyfuncitem):
"""check xfail(strict=True) for the given PASSING test"""
- evalxfail = pyfuncitem._evalxfail
+ evalxfail = pyfuncitem._store[evalxfail_key]
if evalxfail.istrue():
strict_default = pyfuncitem.config.getini("xfail_strict")
is_strict_xfail = evalxfail.get("strict", strict_default)
if is_strict_xfail:
- del pyfuncitem._evalxfail
+ del pyfuncitem._store[evalxfail_key]
explanation = evalxfail.getexplanation()
fail("[XPASS(strict)] " + explanation, pytrace=False)
@@ -121,12 +127,12 @@ def check_strict_xfail(pyfuncitem):
def pytest_runtest_makereport(item, call):
outcome = yield
rep = outcome.get_result()
- evalxfail = getattr(item, "_evalxfail", None)
- # unittest special case, see setting of _unexpectedsuccess
- if hasattr(item, "_unexpectedsuccess") and rep.when == "call":
-
- if item._unexpectedsuccess:
- rep.longrepr = "Unexpected success: {}".format(item._unexpectedsuccess)
+ evalxfail = item._store.get(evalxfail_key, None)
+ # unittest special case, see setting of unexpectedsuccess_key
+ if unexpectedsuccess_key in item._store and rep.when == "call":
+ reason = item._store[unexpectedsuccess_key]
+ if reason:
+ rep.longrepr = "Unexpected success: {}".format(reason)
else:
rep.longrepr = "Unexpected success"
rep.outcome = "failed"
@@ -154,7 +160,7 @@ def pytest_runtest_makereport(item, call):
rep.outcome = "passed"
rep.wasxfail = explanation
elif (
- getattr(item, "_skipped_by_mark", False)
+ item._store.get(skipped_by_mark_key, True)
and rep.skipped
and type(rep.longrepr) is tuple
):