diff options
Diffstat (limited to 'user_activity_benchmarks/benchmark_metrics_experiment_unittest.py')
-rwxr-xr-x | user_activity_benchmarks/benchmark_metrics_experiment_unittest.py | 132 |
1 files changed, 132 insertions, 0 deletions
diff --git a/user_activity_benchmarks/benchmark_metrics_experiment_unittest.py b/user_activity_benchmarks/benchmark_metrics_experiment_unittest.py new file mode 100755 index 00000000..81d2d0e4 --- /dev/null +++ b/user_activity_benchmarks/benchmark_metrics_experiment_unittest.py @@ -0,0 +1,132 @@ +#!/usr/bin/python2 + +# Copyright 2016 The Chromium OS Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. +"""Unit tests for the benchmark_metrics_experiment module.""" + +from benchmark_metrics_experiment import MetricsExperiment + +import mock +import os +import tempfile +import unittest + + +class MetricsExperimentTest(unittest.TestCase): + """Test class for MetricsExperiment class.""" + + def __init__(self, *args, **kwargs): + super(MetricsExperimentTest, self).__init__(*args, **kwargs) + self._pairwise_inclusive_count_test_file = \ + 'testdata/input/pairwise_inclusive_count_test.csv' + self._pairwise_inclusive_count_reference_file = \ + 'testdata/input/pairwise_inclusive_count_reference.csv' + self._inclusive_count_test_file = \ + 'testdata/input/inclusive_count_test.csv' + self._inclusive_count_reference_file = \ + 'testdata/input/inclusive_count_reference.csv' + self._cwp_function_groups_file = \ + 'testdata/input/cwp_function_groups' + + def testParseInclusiveStatisticsFile(self): + expected_inclusive_statistics_test = { + 'func_f,/a/b/file_f': 2.3, + 'func_g,/a/b/file_g': 2.2, + 'func_h,/c/d/file_h': 3.3, + 'func_i,/c/d/file_i': 4.4, + 'func_j,/e/file_j': 5.5, + 'func_k,/e/file_k': 6.6 + } + expected_inclusive_statistics_reference = { + 'func_f,/a/b/file_f': 1.0, + 'func_g,/a/b/file_g': 4.4, + 'func_h,/c/d/file_h': 3.0, + 'func_i,/c/d/file_i': 4.0, + 'func_j,/e/file_j': 5.0, + 'func_l,/e/file_l': 6.0 + } + result_inclusive_statistics_test = \ + MetricsExperiment.ParseInclusiveStatisticsFile( + self._inclusive_count_test_file) + result_inclusive_statistics_reference = \ + MetricsExperiment.ParseInclusiveStatisticsFile( + self._inclusive_count_reference_file) + self.assertEqual(result_inclusive_statistics_test, + expected_inclusive_statistics_test) + self.assertEqual(result_inclusive_statistics_reference, + expected_inclusive_statistics_reference) + + def testParsePairwiseInclusiveStatisticsFile(self): + expected_pairwise_inclusive_statistics_test = { + 'func_f': {'func_g,/a/b/file_g2': 0.01, + 'func_h,/c/d/file_h': 0.02, + 'func_i,/c/d/file_i': 0.03}, + 'func_g': {'func_j,/e/file_j': 0.4, + 'func_m,/e/file_m': 0.6} + } + expected_pairwise_inclusive_statistics_reference = { + 'func_f': {'func_g,/a/b/file_g': 0.1, + 'func_h,/c/d/file_h': 0.2, + 'func_i,/c/d/file_i': 0.3}, + 'func_g': {'func_j,/e/file_j': 0.4} + } + result_pairwise_inclusive_statistics_test = \ + MetricsExperiment.ParsePairwiseInclusiveStatisticsFile( + self._pairwise_inclusive_count_test_file) + result_pairwise_inclusive_statistics_reference = \ + MetricsExperiment.ParsePairwiseInclusiveStatisticsFile( + self._pairwise_inclusive_count_reference_file) + self.assertEqual(result_pairwise_inclusive_statistics_test, + expected_pairwise_inclusive_statistics_test) + self.assertEqual(result_pairwise_inclusive_statistics_reference, + expected_pairwise_inclusive_statistics_reference) + + def _CheckFileContents(self, file_name, expected_content_lines): + with open(file_name, 'r') as input_file: + result_content_lines = input_file.readlines() + self.assertListEqual(expected_content_lines, result_content_lines) + + def testExperiment(self): + group_statistics_file, group_statistics_filename = tempfile.mkstemp() + + os.close(group_statistics_file) + + function_statistics_file, function_statistics_filename = tempfile.mkstemp() + + os.close(function_statistics_file) + + expected_group_statistics_lines = \ + ['group,file_path,function_count,distance_cum,distance_avg,score_cum,' + 'score_avg\n', + 'ab,/a/b,2.0,3.16,1.58,7.52435897436,3.76217948718\n', + 'e,/e,2.0,2.0,1.0,27.5,13.75\n', + 'cd,/c/d,2.0,2.0,1.0,27.5,13.75'] + expected_function_statistics_lines = \ + ['function,file,distance,score\n', + 'func_i,/c/d/file_i,1.0,17.6\n', + 'func_j,/e/file_j,1.0,27.5\n', + 'func_f,/a/b/file_f,1.56,1.47435897436\n', + 'func_h,/c/d/file_h,1.0,9.9\n', + 'func_k,/e/file_k,1.0,0.0\n', + 'func_g,/a/b/file_g,1.6,6.05'] + metric_experiment = \ + MetricsExperiment(self._pairwise_inclusive_count_reference_file, + self._pairwise_inclusive_count_test_file, + self._inclusive_count_reference_file, + self._inclusive_count_test_file, + self._cwp_function_groups_file, + group_statistics_filename, + function_statistics_filename) + + metric_experiment.PerformComputation() + self._CheckFileContents(group_statistics_filename, + expected_group_statistics_lines) + self._CheckFileContents(function_statistics_filename, + expected_function_statistics_lines) + os.remove(group_statistics_filename) + os.remove(function_statistics_filename) + + +if __name__ == '__main__': + unittest.main() |