aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--crosperf/results_cache.py81
-rwxr-xr-xcrosperf/results_cache_unittest.py152
-rw-r--r--crosperf/results_organizer.py4
3 files changed, 234 insertions, 3 deletions
diff --git a/crosperf/results_cache.py b/crosperf/results_cache.py
index 4db15901..178c0860 100644
--- a/crosperf/results_cache.py
+++ b/crosperf/results_cache.py
@@ -55,6 +55,7 @@ class Result(object):
self.perf_data_files = []
self.perf_report_files = []
self.results_file = []
+ self.turbostat_log_file = []
self.chrome_version = ''
self.err = None
self.chroot_results_dir = ''
@@ -65,6 +66,8 @@ class Result(object):
self.cwp_dso = ''
self.retval = None
self.out = None
+ self.cpufreq = []
+ self.cputemp = []
def CopyFilesTo(self, dest_dir, files_to_copy):
file_index = 0
@@ -245,7 +248,7 @@ class Result(object):
def FindFilesInResultsDir(self, find_args):
if not self.results_dir:
- return None
+ return ''
command = 'find %s %s' % (self.results_dir, find_args)
ret, out, _ = self.ce.RunCommandWOutput(command, print_to_console=False)
@@ -277,6 +280,9 @@ class Result(object):
self.FindFilesInResultsDir('-name results-chart.json').splitlines()
return result
+ def GetTurbostatFile(self):
+ return self.FindFilesInResultsDir('-name turbostat.log').split('\n')[0]
+
def _CheckDebugPath(self, option, path):
relative_path = path[1:]
out_chroot_path = os.path.join(self.chromeos_root, 'chroot', relative_path)
@@ -373,6 +379,7 @@ class Result(object):
self.perf_data_files = self.GetPerfDataFiles()
# Include all perf.report data in table.
self.perf_report_files = self.GeneratePerfReportFiles()
+ self.turbostat_log_file = self.GetTurbostatFile()
# TODO(asharif): Do something similar with perf stat.
# Grab keyvals from the directory.
@@ -416,6 +423,69 @@ class Result(object):
keyvals[keyname] = new_value
return keyvals
+ def ProcessTurbostatResults(self):
+ """Given turbostat_log_file parse cpu stats from file."""
+ if not self.turbostat_log_file:
+ self._logger.LogError('Turbostat output file not found.')
+ return {}
+
+ cpustats = {}
+ read_data = ''
+ with open(self.turbostat_log_file) as f:
+ read_data = f.readlines()
+
+ if not read_data:
+ self._logger.LogError('Turbostat output file is empty.')
+ return {}
+
+ # Gather CPU statistics from turbostat output.
+ # Example of the output.
+ # ----------------------
+ # CPU Avg_MHz Busy% Bzy_MHz TSC_MHz IRQ CoreTmp
+ # - 329 12.13 2723 2393 10975 77
+ # 0 336 12.41 2715 2393 6328 77
+ # 2 323 11.86 2731 2393 4647 69
+ # CPU Avg_MHz Busy% Bzy_MHz TSC_MHz IRQ CoreTmp
+ # - 1940 67.46 2884 2393 39920 83
+ # 0 1827 63.70 2877 2393 21184 83
+
+ # First line always contains the header.
+ stats = read_data[0].split()
+
+ # Mandatory parameters.
+ if 'CPU' not in stats:
+ self._logger.LogError('Missing data for CPU# in Turbostat output.')
+ return {}
+ if 'Bzy_MHz' not in stats:
+ self._logger.LogError('Missing data for Bzy_MHz in Turbostat output.')
+ return {}
+ cpu_index = stats.index('CPU')
+ cpufreq_index = stats.index('Bzy_MHz')
+ cpustats['freq'] = []
+
+ # Optional parameters.
+ cputemp_index = -1
+ if 'CoreTmp' in stats:
+ cputemp_index = stats.index('CoreTmp')
+ cpustats['temp'] = []
+
+ # Parse data starting from the second line ignoring repeating headers.
+ for st in read_data[1:]:
+ # Data represented by int or float separated by spaces.
+ numbers = st.split()
+ if not all(word.replace('.', '', 1).isdigit() for word in numbers[1:]):
+ # Skip the line if data mismatch.
+ continue
+ if numbers[cpu_index] != '-':
+ # Ignore Core-specific statistics which starts with Core number.
+ # Combined statistics for all core has "-" CPU identifier.
+ continue
+
+ cpustats['freq'].append(int(numbers[cpufreq_index]))
+ if cputemp_index != -1:
+ cpustats['temp'].append(int(numbers[cputemp_index]))
+ return cpustats
+
def ProcessHistogramsResults(self):
# Open and parse the json results file generated by telemetry/test_that.
if not self.results_file:
@@ -479,6 +549,15 @@ class Result(object):
# Generate report from all perf.data files.
# Now parse all perf report files and include them in keyvals.
self.GatherPerfResults()
+ cpustats = self.ProcessTurbostatResults()
+ if 'freq' in cpustats:
+ self.cpufreq = cpustats['freq']
+ self.keyvals['cpufreq_avg'] = sum(self.cpufreq) / len(self.cpufreq)
+ self.keyvals['cpufreq_min'] = min(self.cpufreq)
+ self.keyvals['cpufreq_max'] = max(self.cpufreq)
+ if 'temp' in cpustats:
+ self.cputemp = cpustats['temp']
+ self.keyvals['cputemp'] = sum(self.cputemp) / len(self.cputemp)
def GetChromeVersionFromCache(self, cache_dir):
# Read chrome_version from keys file, if present.
diff --git a/crosperf/results_cache_unittest.py b/crosperf/results_cache_unittest.py
index 78ea5a6c..fee67491 100755
--- a/crosperf/results_cache_unittest.py
+++ b/crosperf/results_cache_unittest.py
@@ -156,6 +156,41 @@ keyvals = {
'b_string_strstr___abcdefghijklmnopqrstuvwxyz__': '0.0134553343333'
}
+TURBOSTAT_LOG_OUTPUT = \
+"""CPU Avg_MHz Busy% Bzy_MHz TSC_MHz IRQ CoreTmp
+- 329 12.13 2723 2393 10975 77
+0 336 12.41 2715 2393 6328 77
+2 323 11.86 2731 2393 4647 69
+CPU Avg_MHz Busy% Bzy_MHz TSC_MHz IRQ CoreTmp
+- 1940 67.46 2884 2393 39920 83
+0 1827 63.70 2877 2393 21184 83
+2 2053 71.22 2891 2393 18736 67
+CPU Avg_MHz Busy% Bzy_MHz TSC_MHz IRQ CoreTmp
+- 1927 66.02 2927 2393 48946 84
+0 1880 64.47 2925 2393 24457 84
+2 1973 67.57 2928 2393 24489 69
+CPU Avg_MHz Busy% Bzy_MHz TSC_MHz IRQ CoreTmp
+- 1899 64.84 2937 2393 42540 72
+0 2135 72.82 2940 2393 23615 65
+2 1663 56.85 2934 2393 18925 72
+CPU Avg_MHz Busy% Bzy_MHz TSC_MHz IRQ CoreTmp
+- 1908 65.24 2932 2393 43172 75
+0 1876 64.25 2928 2393 20743 75
+2 1939 66.24 2936 2393 22429 69
+CPU Avg_MHz Busy% Bzy_MHz TSC_MHz IRQ CoreTmp
+- 1553 53.12 2933 2393 35488 46
+0 1484 50.80 2929 2393 18246 46
+2 1623 55.44 2936 2393 17242 45
+CPU Avg_MHz Busy% Bzy_MHz TSC_MHz IRQ CoreTmp
+- 843 29.83 2832 2393 28161 47
+0 827 29.35 2826 2393 16093 47
+2 858 30.31 2838 2393 12068 46
+"""
+TURBOSTAT_CPUSTATS = {
+ 'freq': [2723, 2884, 2927, 2937, 2932, 2933, 2832],
+ 'temp': [77, 83, 84, 72, 75, 46, 47]
+}
+
TMP_DIR1 = '/tmp/tmpAbcXyz'
@@ -190,8 +225,10 @@ class ResultTest(unittest.TestCase):
self.callGetNewKeyvals = False
self.callGetResultsFile = False
self.callGetPerfDataFiles = False
+ self.callGetTurbostatFile = False
self.args = None
self.callGatherPerfResults = False
+ self.callProcessTutbostatResults = False
self.mock_logger = mock.Mock(spec=logger.Logger)
self.mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter)
self.mock_label = MockLabel('mock_label', 'build', 'chromeos_image',
@@ -503,7 +540,7 @@ class ResultTest(unittest.TestCase):
self.result.results_dir = None
res = self.result.FindFilesInResultsDir('-name perf.data')
- self.assertIsNone(res)
+ self.assertEqual(res, '')
self.result.ce.RunCommand = mock_runcmd
self.result.results_dir = '/tmp/test_results'
@@ -553,6 +590,85 @@ class ResultTest(unittest.TestCase):
self.assertEqual(res, ['line1', 'line1'])
self.assertEqual(self.args, '-name perf_measurements')
+ @mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput')
+ def test_get_turbostat_file_finds_single_log(self, mock_runcmd):
+ """Expected behavior when a single log file found."""
+ self.result.results_dir = '/tmp/test_results'
+ self.result.ce.RunCommandWOutput = mock_runcmd
+ mock_runcmd.return_value = (0, 'some/long/path/turbostat.log', '')
+ found_single_log = self.result.GetTurbostatFile()
+ self.assertEqual(found_single_log, 'some/long/path/turbostat.log')
+
+ @mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput')
+ def test_get_turbostat_file_finds_multiple_logs(self, mock_runcmd):
+ """Error case when multiple files found."""
+ self.result.results_dir = '/tmp/test_results'
+ self.result.ce.RunCommandWOutput = mock_runcmd
+ mock_runcmd.return_value = (0,
+ 'some/long/path/turbostat.log\nturbostat.log',
+ '')
+ found_first_logs = self.result.GetTurbostatFile()
+ self.assertEqual(found_first_logs, 'some/long/path/turbostat.log')
+
+ @mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput')
+ def test_get_turbostat_file_finds_no_logs(self, mock_runcmd):
+ """Error case when no log file found."""
+ self.result.results_dir = '/tmp/test_results'
+ self.result.ce.RunCommandWOutput = mock_runcmd
+ mock_runcmd.return_value = (0, '', '')
+ found_no_logs = self.result.GetTurbostatFile()
+ self.assertEqual(found_no_logs, '')
+
+ @mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput')
+ def test_get_turbostat_file_with_failing_find(self, mock_runcmd):
+ """Error case when file search returns an error."""
+ self.result.results_dir = '/tmp/test_results'
+ mock_runcmd.return_value = (-1, '', 'error')
+ with self.assertRaises(RuntimeError):
+ self.result.GetTurbostatFile()
+
+ def test_process_turbostat_results_with_valid_data(self):
+ """Normal case when log exists and contains valid data."""
+ self.result.turbostat_log_file = '/tmp/somelogfile.log'
+ with mock.patch('__builtin__.open',
+ mock.mock_open(read_data=TURBOSTAT_LOG_OUTPUT)) as mo:
+ cpustats = self.result.ProcessTurbostatResults()
+ # Check that the log got opened and data were read/parsed.
+ calls = [mock.call('/tmp/somelogfile.log')]
+ mo.assert_has_calls(calls)
+ self.assertEqual(cpustats, TURBOSTAT_CPUSTATS)
+
+ def test_process_turbostat_results_from_empty_file(self):
+ """Error case when log exists but file is empty."""
+ self.result.turbostat_log_file = '/tmp/emptylogfile.log'
+ with mock.patch('__builtin__.open', mock.mock_open(read_data='')) as mo:
+ cpustats = self.result.ProcessTurbostatResults()
+ # Check that the log got opened and parsed successfully and empty data
+ # returned.
+ calls = [mock.call('/tmp/emptylogfile.log')]
+ mo.assert_has_calls(calls)
+ self.assertEqual(cpustats, {})
+
+ def test_process_turbostat_results_with_no_filename(self):
+ """Error case when no log file name provided."""
+ self.result.turbostat_log_file = ''
+ with mock.patch('__builtin__.open', mock.mock_open()) as mo:
+ cpustats = self.result.ProcessTurbostatResults()
+ # Check no attempt to open a log and empty data returned.
+ mo.assert_not_called()
+ self.assertEqual(cpustats, {})
+
+ def test_process_turbostat_results_when_file_doesnt_exist(self):
+ """Error case when file does not exist."""
+ nonexistinglog = '/tmp/1'
+ while os.path.exists(nonexistinglog):
+ # Extend file path if it happens to exist.
+ nonexistinglog = os.path.join(nonexistinglog, '1')
+ self.result.turbostat_log_file = nonexistinglog
+ # Allow the tested function to call a 'real' open and hopefully crash.
+ with self.assertRaises(IOError):
+ self.result.ProcessTurbostatResults()
+
@mock.patch.object(misc, 'GetInsideChrootPath')
@mock.patch.object(command_executer.CommandExecuter, 'ChrootRunCommand')
def test_generate_perf_report_files(self, mock_chrootruncmd, mock_getpath):
@@ -608,6 +724,10 @@ class ResultTest(unittest.TestCase):
self.callGetPerfReportFiles = True
return []
+ def FakeGetTurbostatFile():
+ self.callGetTurbostatFile = True
+ return []
+
def FakeProcessResults(show_results=False):
if show_results:
pass
@@ -622,12 +742,14 @@ class ResultTest(unittest.TestCase):
self.callGetResultsFile = False
self.callGetPerfDataFiles = False
self.callGetPerfReportFiles = False
+ self.callGetTurbostatFile = False
self.callProcessResults = False
self.result.GetResultsDir = FakeGetResultsDir
self.result.GetResultsFile = FakeGetResultsFile
self.result.GetPerfDataFiles = FakeGetPerfDataFiles
self.result.GeneratePerfReportFiles = FakeGetPerfReportFiles
+ self.result.GetTurbostatFile = FakeGetTurbostatFile
self.result.ProcessResults = FakeProcessResults
self.result.PopulateFromRun(OUTPUT, '', 0, 'test', 'telemetry_Crosperf',
@@ -636,6 +758,7 @@ class ResultTest(unittest.TestCase):
self.assertTrue(self.callGetResultsFile)
self.assertTrue(self.callGetPerfDataFiles)
self.assertTrue(self.callGetPerfReportFiles)
+ self.assertTrue(self.callGetTurbostatFile)
self.assertTrue(self.callProcessResults)
def test_process_results(self):
@@ -652,10 +775,21 @@ class ResultTest(unittest.TestCase):
def FakeGetSamples():
return 1
+ def FakeProcessTurbostatResults():
+ self.callProcessTutbostatResults = True
+ res = {}
+ if self.result.turbostat_log_file:
+ res['freq'] = [1, 2, 3]
+ res['temp'] = [5, 6, 7]
+ return res
+
self.callGatherPerfResults = False
+ self.callProcessTutbostatResults = False
+ self.result.turbostat_log_file = ''
self.result.GetKeyvals = FakeGetKeyvals
self.result.GatherPerfResults = FakeGatherPerfResults
+ self.result.ProcessTurbostatResults = FakeProcessTurbostatResults
self.result.retval = 0
self.result.ProcessResults()
@@ -678,6 +812,22 @@ class ResultTest(unittest.TestCase):
'samples': 1,
'retval': 0
})
+ self.result.cwp_dso = ''
+
+ self.result.retval = 0
+ self.result.turbostat_log_file = '/tmp/turbostat.log'
+ self.result.ProcessResults()
+ self.assertTrue(self.callProcessTutbostatResults)
+ self.assertEqual(len(self.result.keyvals), 6)
+ self.assertEqual(
+ self.result.keyvals, {
+ 'Total': 10,
+ 'cpufreq_avg': 2,
+ 'cpufreq_max': 3,
+ 'cpufreq_min': 1,
+ 'cputemp': 6,
+ 'retval': 0
+ })
@mock.patch.object(misc, 'GetInsideChrootPath')
@mock.patch.object(command_executer.CommandExecuter,
diff --git a/crosperf/results_organizer.py b/crosperf/results_organizer.py
index fb7c60dc..8f183cc9 100644
--- a/crosperf/results_organizer.py
+++ b/crosperf/results_organizer.py
@@ -187,7 +187,9 @@ def OrganizeResults(benchmark_runs, labels, benchmarks=None, json_report=False):
if not show_all_results:
summary_list = summary_file.get(benchmark.name)
if summary_list:
- summary_list.append('retval')
+ summary_list += [
+ 'retval', 'cpufreq_avg', 'cpufreq_min', 'cpufreq_max', 'cputemp'
+ ]
else:
# Did not find test_name in json file; show everything.
show_all_results = True