aboutsummaryrefslogtreecommitdiff
path: root/crosperf
diff options
context:
space:
mode:
authorcmtice <cmtice@google.com>2014-04-09 10:58:51 -0700
committerchrome-internal-fetch <chrome-internal-fetch@google.com>2014-04-11 20:01:27 +0000
commitc454cee542ca459ef9bd87c9f72e81c822caf1e5 (patch)
tree39a5c1bdc6c53073e3589da913cef3790f816e7f /crosperf
parent0537956ef5981f4069fcaaefd3d25298f9d6ebd8 (diff)
downloadtoolchain-utils-c454cee542ca459ef9bd87c9f72e81c822caf1e5.tar.gz
Update unittests to all pass.
Fix the parameters to the various unittests so they match recent changes and the unittests all pass again. BUG=None TEST=I ran all the unittests with the changes. Change-Id: I083b5127a2ade8f1dbaf2bb173d82183871cb7c7 Reviewed-on: https://chrome-internal-review.googlesource.com/159915 Reviewed-by: Yunlian Jiang <yunlian@google.com> Commit-Queue: Caroline Tice <cmtice@google.com> Tested-by: Caroline Tice <cmtice@google.com>
Diffstat (limited to 'crosperf')
-rw-r--r--crosperf/benchmark_run.py2
-rwxr-xr-xcrosperf/benchmark_run_unittest.py25
-rw-r--r--crosperf/machine_manager.py3
-rwxr-xr-xcrosperf/machine_manager_unittest.py5
-rwxr-xr-xcrosperf/results_cache_unittest.py11
-rwxr-xr-xcrosperf/results_organizer_unittest.py18
6 files changed, 36 insertions, 28 deletions
diff --git a/crosperf/benchmark_run.py b/crosperf/benchmark_run.py
index 7e615430..da4b7d29 100644
--- a/crosperf/benchmark_run.py
+++ b/crosperf/benchmark_run.py
@@ -243,7 +243,7 @@ class MockBenchmarkRun(BenchmarkRun):
self.test_args,
self.profiler_args)
self.run_completed = True
- rr = MockResult("logger", self.label)
+ rr = MockResult("logger", self.label, self.log_level)
rr.out = out
rr.err = err
rr.retval = retval
diff --git a/crosperf/benchmark_run_unittest.py b/crosperf/benchmark_run_unittest.py
index 4e52e538..d493678d 100755
--- a/crosperf/benchmark_run_unittest.py
+++ b/crosperf/benchmark_run_unittest.py
@@ -21,20 +21,22 @@ from results_cache import MockResultsCache
class BenchmarkRunTest(unittest.TestCase):
def testDryRun(self):
my_label = MockLabel("test1", "image1", "/tmp/test_benchmark_run",
- "x86-alex", "chromeos-alex1",
+ "x86-alex", "chromeos2-row1-rack4-host9.cros",
image_args="",
image_md5sum="",
cache_dir="")
- m = MockMachineManager("/tmp/chromeos_root", 0)
- m.AddMachine("chromeos-alex1")
- bench = Benchmark("PageCyler",
- "Pyautoperf",
- "",
- 1,
- 0.2,
- False,
- False,
- "")
+ logging_level="average"
+ m = MockMachineManager("/tmp/chromeos_root", 0, logging_level)
+ m.AddMachine("chromeos2-row1-rack4-host9.cros")
+ bench = Benchmark("page_cycler.netsim.top_10", # name
+ "page_cycler.netsim.top_10", # test_name
+ "", # test_args
+ 1, # iteratins
+ 0.2, # outlier_range
+ False, # key_results_only
+ False, # rm_chroot_tmp
+ "", # perf_args
+ suite="telemetry_Crosperf") # suite
b = MockBenchmarkRun("test run",
bench,
my_label,
@@ -42,6 +44,7 @@ class BenchmarkRunTest(unittest.TestCase):
[],
m,
logger.GetLogger(),
+ logging_level,
"")
b.cache = MockResultsCache()
b.suite_runner = MockSuiteRunner()
diff --git a/crosperf/machine_manager.py b/crosperf/machine_manager.py
index 0271df4c..52c3d818 100644
--- a/crosperf/machine_manager.py
+++ b/crosperf/machine_manager.py
@@ -457,6 +457,9 @@ class MockCrosMachine(CrosMachine):
self.machine_checksum = self._GetMD5Checksum(self.checksum_string)
self.log_level = log_level
+ def IsReachable(self):
+ return True
+
class MockMachineManager(MachineManager):
diff --git a/crosperf/machine_manager_unittest.py b/crosperf/machine_manager_unittest.py
index 84266d5e..10a34884 100755
--- a/crosperf/machine_manager_unittest.py
+++ b/crosperf/machine_manager_unittest.py
@@ -12,7 +12,7 @@ import machine_manager
class MyMachineManager(machine_manager.MachineManager):
def __init__(self, chromeos_root):
- super(MyMachineManager, self).__init__(chromeos_root, 0)
+ super(MyMachineManager, self).__init__(chromeos_root, 0, "average")
def _TryToLockMachine(self, cros_machine):
self._machines.append(cros_machine)
@@ -22,7 +22,8 @@ class MyMachineManager(machine_manager.MachineManager):
with self._lock:
for m in self._all_machines:
assert m.name != machine_name, "Tried to double-add %s" % machine_name
- cm = machine_manager.MockCrosMachine(machine_name, self.chromeos_root)
+ cm = machine_manager.MockCrosMachine(machine_name, self.chromeos_root,
+ "average")
assert cm.machine_checksum, ("Could not find checksum for machine %s" %
machine_name)
self._all_machines.append(cm)
diff --git a/crosperf/results_cache_unittest.py b/crosperf/results_cache_unittest.py
index 2a2ac719..5f7fbc97 100755
--- a/crosperf/results_cache_unittest.py
+++ b/crosperf/results_cache_unittest.py
@@ -114,13 +114,13 @@ keyvals = {'': 'PASS', 'b_stdio_putcgetc__0_': '0.100005711667', 'b_string_strst
class MockResult(Result):
- def __init__(self, logger, label):
- super(MockResult, self).__init__(logger, label)
+ def __init__(self, logger, label, logging_level):
+ super(MockResult, self).__init__(logger, label, logging_level)
def _FindFilesInResultsDir(self, find_args):
return ""
- def _GetKeyvals(self):
+ def _GetKeyvals(self, show_all_results):
return keyvals
@@ -128,8 +128,9 @@ class ResultTest(unittest.TestCase):
mock_label = MockLabel("mock_label", "chromeos_image", "/tmp", "lumpy",
"remote", "image_args", "image_md5sum", "cache_dir")
def testCreateFromRun(self):
- result = MockResult.CreateFromRun(logger.GetLogger(), self.mock_label,
- output, error, 0, 0)
+ result = MockResult.CreateFromRun(logger.GetLogger(), "average",
+ self.mock_label,
+ output, error, 0, True, 0)
self.assertEqual(result.keyvals, keyvals)
self.assertEqual(result.chroot_results_dir,
"/tmp/run_remote_tests.PO1234567/platform_LibCBench")
diff --git a/crosperf/results_organizer_unittest.py b/crosperf/results_organizer_unittest.py
index b17df82a..7cb09316 100755
--- a/crosperf/results_organizer_unittest.py
+++ b/crosperf/results_organizer_unittest.py
@@ -88,25 +88,25 @@ class ResultOrganizerTest(unittest.TestCase):
benchmarks = [mock_instance.benchmark1, mock_instance.benchmark2]
benchmark_runs = [None]*8
benchmark_runs[0] = BenchmarkRun("b1", benchmarks[0],
- labels[0], 1, "", "", "", "")
+ labels[0], 1, "", "", "", "average", "")
benchmark_runs[1] = BenchmarkRun("b2", benchmarks[0],
- labels[0], 2, "", "", "", "")
+ labels[0], 2, "", "", "", "average", "")
benchmark_runs[2] = BenchmarkRun("b3", benchmarks[0],
- labels[1], 1, "", "", "", "")
+ labels[1], 1, "", "", "", "average", "")
benchmark_runs[3] = BenchmarkRun("b4", benchmarks[0],
- labels[1], 2, "", "", "", "")
+ labels[1], 2, "", "", "", "average", "")
benchmark_runs[4] = BenchmarkRun("b5", benchmarks[1],
- labels[0], 1, "", "", "", "")
+ labels[0], 1, "", "", "", "average", "")
benchmark_runs[5] = BenchmarkRun("b6", benchmarks[1],
- labels[0], 2, "", "", "", "")
+ labels[0], 2, "", "", "", "average", "")
benchmark_runs[6] = BenchmarkRun("b7", benchmarks[1],
- labels[1], 1, "", "", "", "")
+ labels[1], 1, "", "", "", "average", "")
benchmark_runs[7] = BenchmarkRun("b8", benchmarks[1],
- labels[1], 2, "", "", "", "")
+ labels[1], 2, "", "", "", "average", "")
i = 0
for b in benchmark_runs:
- b.result = Result("", b.label)
+ b.result = Result("", b.label, "average")
b.result.keyvals = mock_instance.keyval[i]
i += 1