aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEvelina Dumitrescu <evelinad@google.com>2016-09-16 15:50:42 -0700
committerchrome-bot <chrome-bot@chromium.org>2017-02-01 18:13:42 -0800
commit9c4762d55252fe48671766435e01d5f21bf4719f (patch)
tree9a34429db662040e1a207877b80290fe9e17fc83
parente9895d0f43b3078b847b8d495a7c1e8ae309d382 (diff)
downloadtoolchain-utils-9c4762d55252fe48671766435e01d5f21bf4719f.tar.gz
user_activity: Added unit tests for benchmark metrics.
BUG=None TEST=None Change-Id: I792f03a4cb76acf18691eeb1455a16a2d65537cf Reviewed-on: https://chrome-internal-review.googlesource.com/287960 Tested-by: Evelina Dumitrescu <evelinad@google.com> Reviewed-by: George Burgess <gbiv@google.com> Reviewed-by: Ting-Yuan Huang <laszio@google.com> Reviewed-by: Evelina Dumitrescu <evelinad@google.com> Reviewed-on: https://chromium-review.googlesource.com/435908 Commit-Ready: Luis Lozano <llozano@chromium.org> Tested-by: Luis Lozano <llozano@chromium.org> Reviewed-by: Luis Lozano <llozano@chromium.org>
-rwxr-xr-xuser_activity_benchmarks/benchmark_metrics_unittest.py87
1 files changed, 87 insertions, 0 deletions
diff --git a/user_activity_benchmarks/benchmark_metrics_unittest.py b/user_activity_benchmarks/benchmark_metrics_unittest.py
new file mode 100755
index 00000000..a48361fe
--- /dev/null
+++ b/user_activity_benchmarks/benchmark_metrics_unittest.py
@@ -0,0 +1,87 @@
+#!/usr/bin/python2
+
+# Copyright 2016 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""Unit tests for the benchmark_metrics module."""
+
+import mock
+import unittest
+import benchmark_metrics
+
+
+class MetricsComputationTest(unittest.TestCase):
+ """Test class for MetricsComputation class."""
+
+ def __init__(self, *args, **kwargs):
+ super(MetricsComputationTest, self).__init__(*args, **kwargs)
+
+ def testComputeDistanceForFunction(self):
+ child_functions_statistics_sample = {
+ 'f,file_f': 0.1,
+ 'g,file_g': 0.2,
+ 'h,file_h': 0.3,
+ 'i,file_i': 0.4
+ }
+ child_functions_statistics_reference = {
+ 'f,file_f': 0.4,
+ 'i,file_i': 0.4,
+ 'h,file_h2': 0.2
+ }
+ distance = benchmark_metrics.ComputeDistanceForFunction(
+ child_functions_statistics_sample, child_functions_statistics_reference)
+ self.assertEqual(distance, 2.0)
+
+ distance = benchmark_metrics.ComputeDistanceForFunction({}, {})
+ self.assertEqual(distance, 1.0)
+
+ distance = benchmark_metrics.ComputeDistanceForFunction(
+ child_functions_statistics_sample, {})
+ self.assertEqual(distance, 2.0)
+
+ distance = benchmark_metrics.ComputeDistanceForFunction(
+ {}, child_functions_statistics_reference)
+ self.assertEqual(distance, 2.0)
+
+ def testComputeScoreForFunction(self):
+ score = benchmark_metrics.ComputeScoreForFunction(1.2, 0.3, 0.4)
+ self.assertEqual(score, 0.1)
+
+ def testComputeMetricsForComponents(self):
+ function_metrics = {
+ 'func_f,/a/b/file_f': (1.0, 2.3),
+ 'func_g,/a/b/file_g': (1.1, 1.5),
+ 'func_h,/c/d/file_h': (2.0, 1.7),
+ 'func_i,/c/d/file_i': (1.9, 1.8),
+ 'func_j,/c/d/file_j': (1.8, 1.9),
+ 'func_k,/e/file_k': (1.2, 2.1),
+ 'func_l,/e/file_l': (1.3, 3.1)
+ }
+ cwp_function_groups = [('ab', '/a/b'), ('cd', '/c/d'), ('e', '/e')]
+ expected_metrics = {'ab': ('/a/b', 2.0, 2.1, 1.05, 3.8, 1.9),
+ 'e': ('/e', 2.0, 2.5, 1.25, 5.2, 2.6),
+ 'cd': ('/c/d', 3.0, 5.7, 1.9000000000000001, 5.4, 1.8)}
+ result_metrics = benchmark_metrics.ComputeMetricsForComponents(
+ cwp_function_groups, function_metrics)
+
+ self.assertDictEqual(expected_metrics, result_metrics)
+
+ def testComputeMetricsForBenchmark(self):
+ function_metrics = {'func_f': (1.0, 2.0),
+ 'func_g': (1.1, 2.1),
+ 'func_h': (1.2, 2.2),
+ 'func_i': (1.3, 2.3)}
+ expected_benchmark_metrics = \
+ (4, 4.6000000000000005, 1.1500000000000001, 8.6, 2.15)
+ result_benchmark_metrics = \
+ benchmark_metrics.ComputeMetricsForBenchmark(function_metrics)
+
+ self.assertEqual(expected_benchmark_metrics, result_benchmark_metrics)
+
+ def testComputeMetricsForBenchmarkSet(self):
+ """TODO(evelinad): Add unit test for ComputeMetricsForBenchmarkSet."""
+ pass
+
+
+if __name__ == '__main__':
+ unittest.main()