aboutsummaryrefslogtreecommitdiff
path: root/cros_utils/tabulator_test.py
diff options
context:
space:
mode:
authorZhizhou Yang <zhizhouy@google.com>2019-03-14 13:25:06 -0700
committerchrome-bot <chrome-bot@chromium.org>2019-03-15 16:48:42 -0700
commit1a5a3163b7fe75e053282e7d67148c5f4e409414 (patch)
treef2c94f394dcc3ec171a41b492ba533f4121a1be0 /cros_utils/tabulator_test.py
parent6512f0baf7a540267b735accac77cf6903a63306 (diff)
downloadtoolchain-utils-1a5a3163b7fe75e053282e7d67148c5f4e409414.tar.gz
crosperf: Feature to ignore min and max value in results
This patch provides an option for user to ignore min and max value in the results when generating report. User can use this feature by specifying `ignore_min_max: True` (which by default is False) in experiment file. When values count in a single test is smaller than 3, we automatically ignore this option for it, and print out a warning. BUG=chromium:938758 TEST=Tested with examples in general/cwp mode report generation. Passed all unit tests. Change-Id: I36a4c4d99836c201cdd2f2f9f2a4b1a4ffdaa47d Reviewed-on: https://chromium-review.googlesource.com/1521054 Commit-Ready: Zhizhou Yang <zhizhouy@google.com> Tested-by: Zhizhou Yang <zhizhouy@google.com> Reviewed-by: Zhizhou Yang <zhizhouy@google.com>
Diffstat (limited to 'cros_utils/tabulator_test.py')
-rw-r--r--cros_utils/tabulator_test.py75
1 files changed, 32 insertions, 43 deletions
diff --git a/cros_utils/tabulator_test.py b/cros_utils/tabulator_test.py
index 943d9349..33c8da25 100644
--- a/cros_utils/tabulator_test.py
+++ b/cros_utils/tabulator_test.py
@@ -73,20 +73,22 @@ class TabulatorTest(unittest.TestCase):
b = tabulator.Result()._GetGmean(a)
self.assertTrue(b >= 0.99e+308 and b <= 1.01e+308)
+ def testIgnoreMinMax(self):
+ amr = tabulator.AmeanResult(ignore_min_max=True)
+ cell = tabulator.Cell()
+ values = [1, 2]
+ amr.Compute(cell, values, None)
+ self.assertTrue(cell.value == 1.5)
+ values = [1, 2, 8]
+ amr.Compute(cell, values, None)
+ self.assertTrue(cell.value == 2)
+
def testTableGenerator(self):
- runs = [[{
- 'k1': '10',
- 'k2': '12'
- }, {
- 'k1': '13',
- 'k2': '14',
- 'k3': '15'
- }], [{
- 'k1': '50',
- 'k2': '51',
- 'k3': '52',
- 'k4': '53'
- }]]
+ # yapf: disable
+ runs = [[{'k1': '10', 'k2': '12'},
+ {'k1': '13', 'k2': '14', 'k3': '15'}],
+ [{'k1': '50', 'k2': '51', 'k3': '52', 'k4': '53'}]]
+ # yapf: enable
labels = ['vanilla', 'modified']
tg = tabulator.TableGenerator(runs, labels)
table = tg.GetTable()
@@ -113,24 +115,14 @@ class TabulatorTest(unittest.TestCase):
self.assertTrue(table)
def testSamplesTableGenerator(self):
+ # yapf: disable
keyvals = {
- 'bench1': [[{
- 'samples': 1
- }, {
- 'samples': 2
- }], [{
- 'samples': 3
- }, {
- 'samples': 4
- }]],
- 'bench2': [[{
- 'samples': 5
- }, {}], [{
- 'samples': 6
- }, {
- 'samples': 7
- }]]
+ 'bench1': [[{'samples': 1}, {'samples': 2}],
+ [{'samples': 3}, {'samples': 4}]],
+ 'bench2': [[{'samples': 5}, {}],
+ [{'samples': 6}, {'samples': 7}]]
}
+ # yapf: enable
weights = {'bench1': 0.2, 'bench2': 0.7}
iter_counts = {'bench1': 2, 'bench2': 2}
labels = ['vanilla', 'modified']
@@ -152,22 +144,19 @@ class TabulatorTest(unittest.TestCase):
header = table.pop(0)
self.assertTrue(header == ['Benchmarks', 'Weights', 'vanilla', 'modified'])
row = table.pop(0)
- self.assertTrue(row == [
- 'bench1', 0.2, ((2, 0), [1 * 0.2, 2 * 0.2]), ((2, 0),
- [3 * 0.2, 4 * 0.2])
- ])
+ # yapf: disable
+ self.assertTrue(row == ['bench1', 0.2,
+ ((2, 0), [1 * 0.2, 2 * 0.2]),
+ ((2, 0), [3 * 0.2, 4 * 0.2])])
row = table.pop(0)
- self.assertTrue(row == [
- 'bench2', 0.7, ((1, 1), [5 * 0.7, None]), ((2, 0), [6 * 0.7, 7 * 0.7])
- ])
+ self.assertTrue(row == ['bench2', 0.7,
+ ((1, 1), [5 * 0.7, None]),
+ ((2, 0), [6 * 0.7, 7 * 0.7])])
row = table.pop(0)
- self.assertTrue(row == [
- 'Composite Benchmark (samples)', 'N/A',
- ((1, 1),
- [1 * 0.2 +
- 5 * 0.7, None]), ((2, 0), [3 * 0.2 + 6 * 0.7, 4 * 0.2 + 7 * 0.7])
- ])
-
+ self.assertTrue(row == ['Composite Benchmark (samples)', 'N/A',
+ ((1, 1), [1 * 0.2 + 5 * 0.7, None]),
+ ((2, 0), [3 * 0.2 + 6 * 0.7, 4 * 0.2 + 7 * 0.7])])
+ # yapf: enable
self.assertTrue('Composite Benchmark' in new_keyvals.keys())
self.assertTrue('Composite Benchmark' in new_iter_counts.keys())