diff options
author | Luis Lozano <llozano@chromium.org> | 2015-12-08 10:08:19 -0800 |
---|---|---|
committer | chrome-bot <chrome-bot@chromium.org> | 2015-12-08 20:17:22 +0000 |
commit | 1489d6440dc37585f3bb9325f3d12fbaa5a37b2b (patch) | |
tree | fd0559154d5c0e6f4a8f77b99590a08a11e68089 | |
parent | 3f43271ed61442fce6833ae118fe228a7ef76116 (diff) | |
download | toolchain-utils-1489d6440dc37585f3bb9325f3d12fbaa5a37b2b.tar.gz |
Re-enable graphics_WebGLAquarium from buildbots testing.
With some recent changes to the test_that invocation this should work
now.
Still there is an issue with benchmarks that require compilation
for the particular board. That remains to be fixed.
BUG=chromium:551710
TEST=None. This is a revert.
Change-Id: I547a4853302f11dc58ddc493ee5368b1e2d33c0e
Reviewed-on: https://chrome-internal-review.googlesource.com/241035
Commit-Ready: Luis Lozano <llozano@chromium.org>
Tested-by: Luis Lozano <llozano@chromium.org>
Reviewed-by: Caroline Tice <cmtice@google.com>
-rwxr-xr-x | buildbot_test_toolchains.py | 2 | ||||
-rw-r--r-- | crosperf/experiment_factory.py | 5 |
2 files changed, 1 insertions, 6 deletions
diff --git a/buildbot_test_toolchains.py b/buildbot_test_toolchains.py index a2c9644d..d4a99814 100755 --- a/buildbot_test_toolchains.py +++ b/buildbot_test_toolchains.py @@ -114,7 +114,7 @@ class ToolchainComparator(): retries: 1 """ % (self._board, self._remotes) experiment_tests = """ - benchmark: all_toolchain_perf_pure_telemetry { + benchmark: all_toolchain_perf { suite: telemetry_Crosperf iterations: 3 } diff --git a/crosperf/experiment_factory.py b/crosperf/experiment_factory.py index c7048264..99e305aa 100644 --- a/crosperf/experiment_factory.py +++ b/crosperf/experiment_factory.py @@ -186,11 +186,6 @@ class ExperimentFactory(object): rm_chroot_tmp, perf_args, '', show_all_results, retries, run_local=False)) - elif test_name == 'all_toolchain_perf_pure_telemetry': - self._AppendBenchmarkSet(benchmarks, telemetry_toolchain_perf_tests, - test_args, iterations, rm_chroot_tmp, - perf_args, suite, show_all_results, retries, - run_local) elif test_name == 'all_toolchain_perf_old': self._AppendBenchmarkSet(benchmarks, telemetry_toolchain_old_perf_tests, |