aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorc-parsons <cparsons@google.com>2019-02-14 16:54:42 -0500
committerGitHub <noreply@github.com>2019-02-14 16:54:42 -0500
commit6bb8994a03e9e47b9394888d82444427c5f6149e (patch)
treeecef7b1801130a63be91b01bef0fccf9602bdc99 /lib
parentaa3147f0de51c645aefcf82e523a4ccd0f85cb3c (diff)
downloadbazel-skylib-6bb8994a03e9e47b9394888d82444427c5f6149e.tar.gz
Add analysis-test test framework (#110)
This framework allows for easy creation of unittest-like tests to make assertions on the provider-values returned by real targets.
Diffstat (limited to 'lib')
-rw-r--r--lib/unittest.bzl136
1 files changed, 124 insertions, 12 deletions
diff --git a/lib/unittest.bzl b/lib/unittest.bzl
index 0d5307b..793955f 100644
--- a/lib/unittest.bzl
+++ b/lib/unittest.bzl
@@ -59,6 +59,20 @@ unittest_toolchain = rule(
},
)
+def _impl_function_name(impl):
+ """Derives the name of the given rule implementation function.
+
+ This can be used for better test feedback.
+ """
+
+ # Starlark currently stringifies a function as "<function NAME>", so we use
+ # that knowledge to parse the "NAME" portion out. If this behavior ever
+ # changes, we'll need to update this.
+ # TODO(bazel-team): Expose a ._name field on functions to avoid this.
+ impl_name = str(impl)
+ impl_name = impl_name.partition("<function ")[-1]
+ return impl_name.rpartition(">")[0]
+
def _make(impl, attrs = None):
"""Creates a unit test rule from its implementation function.
@@ -94,18 +108,8 @@ def _make(impl, attrs = None):
A rule definition that should be stored in a global whose name ends in
`_test`.
"""
-
- # Derive the name of the implementation function for better test feedback.
- # Starlark currently stringifies a function as "<function NAME>", so we use
- # that knowledge to parse the "NAME" portion out. If this behavior ever
- # changes, we'll need to update this.
- # TODO(bazel-team): Expose a ._name field on functions to avoid this.
- impl_name = str(impl)
- impl_name = impl_name.partition("<function ")[-1]
- impl_name = impl_name.rpartition(">")[0]
-
attrs = dict(attrs) if attrs else {}
- attrs["_impl_name"] = attr.string(default = impl_name)
+ attrs["_impl_name"] = attr.string(default = _impl_function_name(impl))
return rule(
impl,
@@ -115,6 +119,69 @@ def _make(impl, attrs = None):
toolchains = [TOOLCHAIN_TYPE],
)
+# TODO(cparsons): Provide more full documentation on analysis testing in README.
+def _make_analysis_test(impl, expect_failure = False, config_settings = {}):
+ """Creates an analysis test rule from its implementation function.
+
+ An analysis test verifies the behavior of a "real" rule target by examining
+ and asserting on the providers given by the real target.
+
+ Each analysis test is defined in an implementation function that must then be
+ associated with a rule so that a target can be built. This function handles
+ the boilerplate to create and return a test rule and captures the
+ implementation function's name so that it can be printed in test feedback.
+
+ An example of an analysis test:
+
+ ```
+ def _your_test(ctx):
+ env = analysistest.begin(ctx)
+
+ # Assert statements go here
+
+ return analysistest.end(env)
+
+ your_test = analysistest.make(_your_test)
+ ```
+
+ Recall that names of test rules must end in `_test`.
+
+ Args:
+ impl: The implementation function of the unit test.
+ expect_failure: If true, the analysis test will expect the target_under_test
+ to fail. Assertions can be made on the underlying failure using asserts.expect_failure
+ config_settings: A dictionary of configuration settings to change for the target under
+ test and its dependencies. This may be used to essentially change 'build flags' for
+ the target under test, and may thus be utilized to test multiple targets with different
+ flags in a single build
+
+ Returns:
+ A rule definition that should be stored in a global whose name ends in
+ `_test`.
+ """
+ attrs = {}
+ attrs["_impl_name"] = attr.string(default = _impl_function_name(impl))
+
+ changed_settings = dict(config_settings)
+ if expect_failure:
+ changed_settings["//command_line_option:allow_analysis_failures"] = "True"
+
+ if changed_settings:
+ test_transition = analysis_test_transition(
+ settings = changed_settings,
+ )
+ attrs["target_under_test"] = attr.label(cfg = test_transition, mandatory = True)
+ else:
+ attrs["target_under_test"] = attr.label(mandatory = True)
+
+ return rule(
+ impl,
+ attrs = attrs,
+ test = True,
+ toolchains = [TOOLCHAIN_TYPE],
+ analysis_test = True,
+ )
+
def _suite(name, *test_rules):
"""Defines a `test_suite` target that contains multiple tests.
@@ -189,10 +256,24 @@ def _begin(ctx):
"""
return struct(ctx = ctx, failures = [])
+def _end_analysis_test(env):
+ """Ends an analysis test and logs the results.
+
+ This must be called and returned at the end of an analysis test implementation function so
+ that the results are reported.
+
+ Args:
+ env: The test environment returned by `analysistest.begin`.
+ """
+ return [AnalysisTestResultInfo(
+ success = (len(env.failures) == 0),
+ message = "\n".join(env.failures),
+ )]
+
def _end(env):
"""Ends a unit test and logs the results.
- This must be called before the end of a unit test implementation function so
+ This must be called and returned at the end of a unit test implementation function so
that the results are reported.
Args:
@@ -308,7 +389,31 @@ def _assert_new_set_equals(env, expected, actual, msg = None):
full_msg = expectation_msg
_fail(env, full_msg)
+def _expect_failure(env, expected_failure_msg = ""):
+ """Asserts that the target under test has failed with a given error message.
+
+ This requires that the analysis test is created with `analysistest.make()` and
+ `expect_failures = True` is specified.
+
+ Args:
+ env: The test environment returned by `unittest.begin`.
+ expected_failure_msg: The error message to expect as a result of analysis failures.
+ """
+ dep = getattr(env.ctx.attr, "target_under_test")[0]
+ if AnalysisFailureInfo in dep:
+ dep_failure = dep[AnalysisFailureInfo]
+ actual_errors = ""
+ for cause in dep[AnalysisFailureInfo].causes.to_list():
+ actual_errors += cause.message + "\n"
+ if actual_errors.find(expected_failure_msg) < 0:
+ expectation_msg = "Expected errors to contain '%s' but did not. " % expected_failure_msg
+ expectation_msg += "Actual errors:%s" % actual_errors
+ _fail(env, expectation_msg)
+ else:
+ _fail(env, "Expected failure of target_under_test, but found success")
+
asserts = struct(
+ expect_failure = _expect_failure,
equals = _assert_equals,
false = _assert_false,
set_equals = _assert_set_equals,
@@ -323,3 +428,10 @@ unittest = struct(
end = _end,
fail = _fail,
)
+
+analysistest = struct(
+ make = _make_analysis_test,
+ begin = _begin,
+ end = _end_analysis_test,
+ fail = _fail,
+)