aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDenis Nikitin <denik@google.com>2019-09-27 09:04:31 -0700
committerDenis Nikitin <denik@chromium.org>2019-09-28 04:29:49 +0000
commit30a061f7921c173e9a25a940319a7f79e41c98ec (patch)
treefbf41a5066384a85948831e0168b892d4f927eab
parent4713fd1d6e502969ab8690adb513d8d00973ac44 (diff)
downloadtoolchain-utils-30a061f7921c173e9a25a940319a7f79e41c98ec.tar.gz
crosperf: Print top5 commands into log
Enable top in nightly tests and print top 5 commands with high CPU usage into log. BUG=chromium:966514 TEST=unittests and HW tests on eve passed. Change-Id: I0efd7f0875829c4423e998ec2fbec7436f7ae148 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/1829309 Tested-by: Denis Nikitin <denik@chromium.org> Reviewed-by: George Burgess <gbiv@chromium.org>
-rwxr-xr-xbuildbot_test_toolchains.py1
-rw-r--r--crosperf/benchmark_run.py22
-rwxr-xr-xcrosperf/benchmark_run_unittest.py115
-rw-r--r--crosperf/results_cache.py145
-rwxr-xr-xcrosperf/results_cache_unittest.py136
5 files changed, 396 insertions, 23 deletions
diff --git a/buildbot_test_toolchains.py b/buildbot_test_toolchains.py
index 6a72f3c4..e289b1a5 100755
--- a/buildbot_test_toolchains.py
+++ b/buildbot_test_toolchains.py
@@ -159,6 +159,7 @@ class ToolchainComparator(object):
cooldown_temp: 40
cooldown_time: 10
cpu_freq_pct: 95
+ top_interval: 1
""" % (self._board, self._remotes)
experiment_tests = """
benchmark: all_toolchain_perf {
diff --git a/crosperf/benchmark_run.py b/crosperf/benchmark_run.py
index fd2f857b..0147599d 100644
--- a/crosperf/benchmark_run.py
+++ b/crosperf/benchmark_run.py
@@ -91,6 +91,25 @@ class BenchmarkRun(threading.Thread):
self.cache_hit = (self.result is not None)
self.cache_has_been_read = True
+ def PrintTop5Cmds(self, topcmds):
+ """Print top 5 commands into log."""
+
+ self._logger.LogOutput('%s' % str(self))
+ self._logger.LogOutput('Top 5 commands with highest CPU usage:')
+ # Header.
+ print_line = '%20s %9s %6s %s' % ('COMMAND', 'AVG CPU%', 'COUNT',
+ 'HIGHEST 5')
+ self._logger.LogOutput(print_line)
+ self._logger.LogOutput('-' * 50)
+ if topcmds:
+ for topcmd in topcmds[:5]:
+ print_line = '%20s %9.2f %6s %s' % (topcmd['cmd'], topcmd['cpu_avg'],
+ topcmd['count'], topcmd['top5'])
+ self._logger.LogOutput(print_line)
+ else:
+ self._logger.LogOutput('[NO DATA FROM THE TOP LOG]')
+ self._logger.LogOutput('-' * 50)
+
def run(self):
try:
if not self.cache_has_been_read:
@@ -118,6 +137,9 @@ class BenchmarkRun(threading.Thread):
self.machine = self.AcquireMachine()
self.cache.machine = self.machine
self.result = self.RunTest(self.machine)
+ # TODO(denik): Add Top5 report into html.
+ if self.result:
+ self.PrintTop5Cmds(self.result.GetTopCmds())
self.cache.remote = self.machine.name
self.label.chrome_version = self.machine_manager.GetChromeVersion(
diff --git a/crosperf/benchmark_run_unittest.py b/crosperf/benchmark_run_unittest.py
index e5c77113..98bb96cd 100755
--- a/crosperf/benchmark_run_unittest.py
+++ b/crosperf/benchmark_run_unittest.py
@@ -140,27 +140,27 @@ class BenchmarkRunTest(unittest.TestCase):
'average', '', {})
def MockLogOutput(msg, print_to_console=False):
- 'Helper function for test_run.'
+ """Helper function for test_run."""
del print_to_console
self.log_output.append(msg)
def MockLogError(msg, print_to_console=False):
- 'Helper function for test_run.'
+ """Helper function for test_run."""
del print_to_console
self.log_error.append(msg)
def MockRecordStatus(msg):
- 'Helper function for test_run.'
+ """Helper function for test_run."""
self.status.append(msg)
def FakeReadCache():
- 'Helper function for test_run.'
+ """Helper function for test_run."""
br.cache = mock.Mock(spec=ResultsCache)
self.called_ReadCache = True
return 0
def FakeReadCacheSucceed():
- 'Helper function for test_run.'
+ """Helper function for test_run."""
br.cache = mock.Mock(spec=ResultsCache)
br.result = mock.Mock(spec=Result)
br.result.out = 'result.out stuff'
@@ -170,29 +170,29 @@ class BenchmarkRunTest(unittest.TestCase):
return 0
def FakeReadCacheException():
- 'Helper function for test_run.'
+ """Helper function for test_run."""
raise RuntimeError('This is an exception test; it is supposed to happen')
def FakeAcquireMachine():
- 'Helper function for test_run.'
+ """Helper function for test_run."""
mock_machine = MockCrosMachine('chromeos1-row3-rack5-host7.cros',
'chromeos', 'average')
return mock_machine
def FakeRunTest(_machine):
- 'Helper function for test_run.'
+ """Helper function for test_run."""
mock_result = mock.Mock(spec=Result)
mock_result.retval = 0
return mock_result
def FakeRunTestFail(_machine):
- 'Helper function for test_run.'
+ """Helper function for test_run."""
mock_result = mock.Mock(spec=Result)
mock_result.retval = 1
return mock_result
def ResetTestValues():
- 'Helper function for test_run.'
+ """Helper function for test_run."""
self.log_output = []
self.log_error = []
self.status = []
@@ -206,6 +206,7 @@ class BenchmarkRunTest(unittest.TestCase):
br.ReadCache = FakeReadCache
br.RunTest = FakeRunTest
br.AcquireMachine = FakeAcquireMachine
+ br.PrintTop5Cmds = mock.Mock()
# First test: No cache hit, all goes well.
ResetTestValues()
@@ -218,6 +219,7 @@ class BenchmarkRunTest(unittest.TestCase):
])
self.assertEqual(len(self.log_error), 0)
self.assertEqual(self.status, ['WAITING', 'SUCCEEDED'])
+ br.PrintTop5Cmds.assert_called_once()
# Second test: No cached result found; test run was "terminated" for some
# reason.
@@ -280,11 +282,11 @@ class BenchmarkRunTest(unittest.TestCase):
'average', '', {})
def GetLastEventPassed():
- 'Helper function for test_terminate_pass'
+ """Helper function for test_terminate_pass"""
return benchmark_run.STATUS_SUCCEEDED
def RecordStub(status):
- 'Helper function for test_terminate_pass'
+ """Helper function for test_terminate_pass"""
self.status = status
self.status = benchmark_run.STATUS_SUCCEEDED
@@ -307,11 +309,11 @@ class BenchmarkRunTest(unittest.TestCase):
'average', '', {})
def GetLastEventFailed():
- 'Helper function for test_terminate_fail'
+ """Helper function for test_terminate_fail"""
return benchmark_run.STATUS_FAILED
def RecordStub(status):
- 'Helper function for test_terminate_fail'
+ """Helper function for test_terminate_fail"""
self.status = status
self.status = benchmark_run.STATUS_SUCCEEDED
@@ -351,7 +353,7 @@ class BenchmarkRunTest(unittest.TestCase):
'average', '', {})
def MockLogError(err_msg):
- 'Helper function for test_get_extra_autotest_args'
+ """Helper function for test_get_extra_autotest_args"""
self.err_msg = err_msg
self.mock_logger.LogError = MockLogError
@@ -434,6 +436,89 @@ class BenchmarkRunTest(unittest.TestCase):
br.SetCacheConditions(self.test_cache_conditions)
self.assertEqual(br.cache_conditions, self.test_cache_conditions)
+ def test_print_top5_cmds(self):
+ """Test print of top5 commands."""
+ topcmds = [
+ {
+ 'cmd': 'chrome',
+ 'cpu_avg': 119.753453465,
+ 'count': 4,
+ 'top5': [122.8, 107.9, 17.8, 1.0],
+ },
+ {
+ 'cmd': 'irq/230-cros-ec',
+ 'cpu_avg': 10.000000000000001,
+ 'count': 1000,
+ 'top5': [0.5, 0.4, 0.3, 0.2, 0.1],
+ },
+ {
+ 'cmd': 'powerd',
+ 'cpu_avg': 2.0,
+ 'count': 2,
+ 'top5': [3.0, 1.0]
+ },
+ {
+ 'cmd': 'cmd1',
+ 'cpu_avg': 1.0,
+ 'count': 1,
+ 'top5': [1.0],
+ },
+ {
+ 'cmd': 'cmd2',
+ 'cpu_avg': 1.0,
+ 'count': 1,
+ 'top5': [1.0],
+ },
+ {
+ 'cmd': 'not_for_print',
+ 'cpu_avg': 1.0,
+ 'count': 1,
+ 'top5': [1.0],
+ },
+ ]
+ mock_logger = mock.Mock()
+ br = benchmark_run.BenchmarkRun(
+ 'test_run', self.test_benchmark, self.test_label, 1,
+ self.test_cache_conditions, self.mock_machine_manager, mock_logger,
+ 'average', '', {})
+ br.PrintTop5Cmds(topcmds)
+ # pylint: disable=line-too-long
+ self.assertEqual(mock_logger.LogOutput.call_args_list, [
+ mock.call('BenchmarkRun[name="test_run"]'),
+ mock.call('Top 5 commands with highest CPU usage:'),
+ mock.call(' COMMAND AVG CPU% COUNT HIGHEST 5'),
+ mock.call('-' * 50),
+ mock.call(
+ ' chrome 119.75 4 [122.8, 107.9, 17.8, 1.0]'
+ ),
+ mock.call(
+ ' irq/230-cros-ec 10.00 1000 [0.5, 0.4, 0.3, 0.2, 0.1]'
+ ),
+ mock.call(' powerd 2.00 2 [3.0, 1.0]'),
+ mock.call(' cmd1 1.00 1 [1.0]'),
+ mock.call(' cmd2 1.00 1 [1.0]'),
+ mock.call('-' * 50),
+ ])
+ # pylint: enable=line-too-long
+
+ def test_print_top5_calls_no_data(self):
+ """Test print of top5 with no data."""
+ topcmds = []
+ mock_logger = mock.Mock()
+ br = benchmark_run.BenchmarkRun(
+ 'test_run', self.test_benchmark, self.test_label, 1,
+ self.test_cache_conditions, self.mock_machine_manager, mock_logger,
+ 'average', '', {})
+ br.PrintTop5Cmds(topcmds)
+ self.assertEqual(mock_logger.LogOutput.call_args_list, [
+ mock.call('BenchmarkRun[name="test_run"]'),
+ mock.call('Top 5 commands with highest CPU usage:'),
+ mock.call(' COMMAND AVG CPU% COUNT HIGHEST 5'),
+ mock.call('-' * 50),
+ mock.call('[NO DATA FROM THE TOP LOG]'),
+ mock.call('-' * 50),
+ ])
+
if __name__ == '__main__':
unittest.main()
diff --git a/crosperf/results_cache.py b/crosperf/results_cache.py
index 274e198a..135c7687 100644
--- a/crosperf/results_cache.py
+++ b/crosperf/results_cache.py
@@ -10,6 +10,7 @@ from __future__ import print_function
import glob
import hashlib
+import heapq
import json
import os
import pickle
@@ -55,6 +56,7 @@ class Result(object):
self.results_file = []
self.turbostat_log_file = ''
self.cpustats_log_file = ''
+ self.top_log_file = ''
self.chrome_version = ''
self.err = None
self.chroot_results_dir = ''
@@ -65,6 +67,11 @@ class Result(object):
self.cwp_dso = ''
self.retval = None
self.out = None
+ self.top_cmds = []
+
+ def GetTopCmds(self):
+ """Get the list of top commands consuming CPU on the machine."""
+ return self.top_cmds
def CopyFilesTo(self, dest_dir, files_to_copy):
file_index = 0
@@ -283,6 +290,10 @@ class Result(object):
"""Get cpustats log path string."""
return self.FindFilesInResultsDir('-name cpustats.log').split('\n')[0]
+ def GetTopFile(self):
+ """Get cpustats log path string."""
+ return self.FindFilesInResultsDir('-name top.log').split('\n')[0]
+
def _CheckDebugPath(self, option, path):
relative_path = path[1:]
out_chroot_path = os.path.join(self.chromeos_root, 'chroot', relative_path)
@@ -381,6 +392,7 @@ class Result(object):
self.perf_report_files = self.GeneratePerfReportFiles()
self.turbostat_log_file = self.GetTurbostatFile()
self.cpustats_log_file = self.GetCpustatsFile()
+ self.top_log_file = self.GetTopFile()
# TODO(asharif): Do something similar with perf stat.
# Grab keyvals from the directory.
@@ -447,7 +459,7 @@ class Result(object):
read_data = f.readlines()
if not read_data:
- self._logger.LogError('Turbostat output file is empty.')
+ self._logger.LogOutput('WARNING: Turbostat output file is empty.')
return {}
# First line always contains the header.
@@ -455,10 +467,12 @@ class Result(object):
# Mandatory parameters.
if 'CPU' not in stats:
- self._logger.LogError('Missing data for CPU# in Turbostat output.')
+ self._logger.LogOutput(
+ 'WARNING: Missing data for CPU# in Turbostat output.')
return {}
if 'Bzy_MHz' not in stats:
- self._logger.LogError('Missing data for Bzy_MHz in Turbostat output.')
+ self._logger.LogOutput(
+ 'WARNING: Missing data for Bzy_MHz in Turbostat output.')
return {}
cpu_index = stats.index('CPU')
cpufreq_index = stats.index('Bzy_MHz')
@@ -487,6 +501,123 @@ class Result(object):
cputemp['all'].append(int(numbers[cputemp_index]))
return cpustats
+ def ProcessTopResults(self):
+ """Given self.top_log_file process top log data.
+
+ Returns:
+ List of dictionaries with the following keyvals:
+ 'cmd': command name (string),
+ 'cpu_avg': average cpu usage (float),
+ 'count': number of occurrences (int),
+ 'top5': up to 5 highest cpu usages (descending list of floats)
+
+ Example of the top log:
+ PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
+ 4102 chronos 12 -8 3454472 238300 118188 R 41.8 6.1 0:08.37 chrome
+ 375 root 0 -20 0 0 0 S 5.9 0.0 0:00.17 kworker
+ 617 syslog 20 0 25332 8372 7888 S 5.9 0.2 0:00.77 systemd
+
+ PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
+ 5745 chronos 20 0 5438580 139328 67988 R 122.8 3.6 0:04.26 chrome
+ 912 root -51 0 0 0 0 S 2.0 0.0 0:01.04 irq/cro
+ 121 root 20 0 0 0 0 S 1.0 0.0 0:00.45 spi5
+ """
+ all_data = ''
+ with open(self.top_log_file) as f:
+ all_data = f.read()
+
+ if not all_data:
+ self._logger.LogOutput('WARNING: Top log file is empty.')
+ return []
+
+ top_line_regex = re.compile(
+ r"""
+ ^\s*(?P<pid>\d+)\s+ # Group 1: PID
+ \S+\s+\S+\s+-?\d+\s+ # Ignore: user, prio, nice
+ \d+\s+\d+\s+\d+\s+ # Ignore: virt/res/shared mem
+ \S+\s+ # Ignore: state
+ (?P<cpu_use>\d+\.\d+)\s+ # Group 2: CPU usage
+ \d+\.\d+\s+\d+:\d+\.\d+\s+ # Ignore: mem usage, time
+ (?P<cmd>\S+)$ # Group 3: command
+ """, re.VERBOSE)
+ # Page represents top log data per one measurement within time interval
+ # 'top_interval'.
+ # Pages separated by empty line.
+ pages = all_data.split('\n\n')
+ # Snapshots are structured representation of the pages.
+ snapshots = []
+ for page in pages:
+ if not page:
+ continue
+
+ # Snapshot list will contain all processes (command duplicates are
+ # allowed).
+ snapshot = []
+ for line in page.splitlines():
+ match = top_line_regex.match(line)
+ if match:
+ # Top line is valid, collect data.
+ process = {
+ # NOTE: One command may be represented by multiple processes.
+ 'cmd': match.group('cmd'),
+ 'pid': int(match.group('pid')),
+ 'cpu_use': float(match.group('cpu_use')),
+ }
+
+ # Filter out processes with 0 CPU usage and top command.
+ if process['cpu_use'] > 0 and process['cmd'] != 'top':
+ snapshot.append(process)
+
+ # If page contained meaningful data add snapshot to the list.
+ if snapshot:
+ snapshots.append(snapshot)
+
+ # Threshold of CPU usage when Chrome is busy, i.e. benchmark is running.
+ # FIXME(denik): 70 is just a guess and needs empirical evidence.
+ # (It does not need to be configurable.)
+ chrome_high_cpu_load = 70
+ # Number of snapshots where chrome is heavily used.
+ active_snapshots = 0
+ # Total CPU use per process in ALL active snapshots.
+ cmd_total_cpu_use = {}
+ # Top CPU usages per command.
+ cmd_top5_cpu_use = {}
+ # List of Top Commands to be returned.
+ topcmds = []
+
+ for snapshot_processes in snapshots:
+ if any(chrome_proc['cpu_use'] > chrome_high_cpu_load
+ for chrome_proc in snapshot_processes
+ if chrome_proc['cmd'] == 'chrome'):
+ # This is a snapshot where at least one chrome command
+ # has CPU usage above the threshold.
+ active_snapshots += 1
+ for process in snapshot_processes:
+ cmd = process['cmd']
+ cpu_use = process['cpu_use']
+
+ # Update total CPU usage.
+ total_cpu_use = cmd_total_cpu_use.setdefault(cmd, 0.0)
+ cmd_total_cpu_use[cmd] = total_cpu_use + cpu_use
+
+ # Add cpu_use into command top cpu usages, sorted in descending
+ # order.
+ top5_list = cmd_top5_cpu_use.setdefault(cmd, [])
+ heapq.heappush(top5_list, cpu_use)
+
+ for consumer, usage in sorted(
+ cmd_total_cpu_use.items(), key=lambda x: x[1], reverse=True):
+ # Iterate through commands by descending order of total CPU usage.
+ topcmd = {
+ 'cmd': consumer,
+ 'cpu_avg': usage / active_snapshots,
+ 'count': len(cmd_top5_cpu_use[consumer]),
+ 'top5': heapq.nlargest(5, cmd_top5_cpu_use[consumer]),
+ }
+ topcmds.append(topcmd)
+
+ return topcmds
+
def ProcessCpustatsResults(self):
"""Given cpustats_log_file non-null parse cpu data from file.
@@ -513,7 +644,7 @@ class Result(object):
read_data = f.readlines()
if not read_data:
- self._logger.LogError('Cpustats output file is empty.')
+ self._logger.LogOutput('WARNING: Cpustats output file is empty.')
return {}
cpufreq_regex = re.compile(r'^[/\S]+/(cpu\d+)/[/\S]+\s+(\d+)$')
@@ -583,8 +714,8 @@ class Result(object):
vals = obj['sampleValues']
if isinstance(vals, list):
# Remove None elements from the list
- vals = list(filter(None, vals))
- if len(vals):
+ vals = [val for val in vals if val is not None]
+ if vals:
result = float(sum(vals)) / len(vals)
else:
result = 0
@@ -654,6 +785,8 @@ class Result(object):
# Process cpustats output only if turbostat has no data.
if not cpustats and self.cpustats_log_file:
cpustats = self.ProcessCpustatsResults()
+ if self.top_log_file:
+ self.top_cmds = self.ProcessTopResults()
for param_key, param in cpustats.items():
for param_type, param_values in param.items():
diff --git a/crosperf/results_cache_unittest.py b/crosperf/results_cache_unittest.py
index c1e151b7..28ce599c 100755
--- a/crosperf/results_cache_unittest.py
+++ b/crosperf/results_cache_unittest.py
@@ -13,7 +13,6 @@ import os
import shutil
import tempfile
import unittest
-import mock
import mock
@@ -30,6 +29,7 @@ from cros_utils import command_executer
from cros_utils import logger
from cros_utils import misc
+# pylint: disable=line-too-long
OUTPUT = """CMD (True): ./test_that.sh\
--remote=172.17.128.241 --board=lumpy LibCBench
CMD (None): cd /usr/local/google/home/yunlian/gd/src/build/images/lumpy/latest/../../../../..; cros_sdk -- ./in_chroot_cmd6X7Cxu.sh
@@ -198,6 +198,79 @@ TURBOSTAT_DATA = {
},
}
+TOP_LOG = \
+"""
+ PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
+ 4102 chronos 12 -8 3454472 238300 118188 R 41.8 6.1 0:08.37 chrome
+ 4204 chronos 12 -8 2492716 205728 179016 S 11.8 5.3 0:03.89 chrome
+ 4890 root 20 0 3396 2064 1596 R 11.8 0.1 0:00.03 top
+ 375 root 0 -20 0 0 0 S 5.9 0.0 0:00.17 kworker/u13
+ 617 syslog 20 0 25332 8372 7888 S 5.9 0.2 0:00.77 sys-journal
+
+ PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
+ 5745 chronos 20 0 5438580 139328 67988 R 122.8 3.6 0:04.26 chrome
+ 912 root -51 0 0 0 0 S 2.0 0.0 0:01.04 irq/cros-ec
+ 121 root 20 0 0 0 0 S 1.0 0.0 0:00.45 spi5
+ 4811 root 20 0 6808 4084 3492 S 1.0 0.1 0:00.02 sshd
+ 4890 root 20 0 3364 2148 1596 R 1.0 0.1 0:00.36 top
+ 5205 chronos 12 -8 3673780 240928 130864 S 1.0 6.2 0:07.30 chrome
+
+
+ PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
+ 5745 chronos 20 0 5434484 139432 63892 R 107.9 3.6 0:05.35 chrome
+ 5713 chronos 20 0 5178652 103120 50372 S 17.8 2.6 0:01.13 chrome
+ 7 root 20 0 0 0 0 S 1.0 0.0 0:00.73 rcu_preempt
+ 855 root 20 0 0 0 0 S 1.0 0.0 0:00.01 kworker/4:2
+"""
+TOP_DATA = [
+ {
+ 'cmd': 'chrome',
+ 'cpu_avg': 124.75,
+ 'count': 4,
+ 'top5': [122.8, 107.9, 17.8, 1.0],
+ },
+ {
+ 'cmd': 'irq/cros-ec',
+ 'cpu_avg': 1.0,
+ 'count': 1,
+ 'top5': [2.0],
+ },
+ {
+ 'cmd': 'sshd',
+ 'cpu_avg': 0.5,
+ 'count': 1,
+ 'top5': [1.0],
+ },
+ {
+ 'cmd': 'spi5',
+ 'cpu_avg': 0.5,
+ 'count': 1,
+ 'top5': [1.0],
+ },
+ {
+ 'cmd': 'rcu_preempt',
+ 'cpu_avg': 0.5,
+ 'count': 1,
+ 'top5': [1.0],
+ },
+ {
+ 'cmd': 'kworker/4:2',
+ 'cpu_avg': 0.5,
+ 'count': 1,
+ 'top5': [1.0],
+ },
+]
+TOP_OUTPUT = \
+""" COMMAND AVG CPU% SEEN HIGHEST 5
+ chrome 128.250000 6 [122.8, 107.9, 17.8, 5.0, 2.0]
+ irq/230-cros-ec 1.000000 1 [2.0]
+ sshd 0.500000 1 [1.0]
+ irq/231-cros-ec 0.500000 1 [1.0]
+ spi5 0.500000 1 [1.0]
+ rcu_preempt 0.500000 1 [1.0]
+ kworker/4:2 0.500000 1 [1.0]
+"""
+
CPUSTATS_UNIQ_OUTPUT = \
"""
/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_cur_freq 1512000
@@ -324,6 +397,8 @@ HISTOGRAMSET = \
]
"""
+# pylint: enable=line-too-long
+
class MockResult(Result):
"""Mock result class."""
@@ -358,6 +433,7 @@ class ResultTest(unittest.TestCase):
self.callGetPerfDataFiles = False
self.callGetTurbostatFile = False
self.callGetCpustatsFile = False
+ self.callGetTopFile = False
self.args = None
self.callGatherPerfResults = False
self.mock_logger = mock.Mock(spec=logger.Logger)
@@ -759,6 +835,33 @@ class ResultTest(unittest.TestCase):
self.result.GetTurbostatFile()
@mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput')
+ def test_get_top_file_finds_single_log(self, mock_runcmd):
+ """Expected behavior when a single top log file found."""
+ self.result.results_dir = '/tmp/test_results'
+ self.result.ce.RunCommandWOutput = mock_runcmd
+ mock_runcmd.return_value = (0, 'some/long/path/top.log', '')
+ found_single_log = self.result.GetTopFile()
+ self.assertEqual(found_single_log, 'some/long/path/top.log')
+
+ @mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput')
+ def test_get_top_file_finds_multiple_logs(self, mock_runcmd):
+ """The case when multiple top files found."""
+ self.result.results_dir = '/tmp/test_results'
+ self.result.ce.RunCommandWOutput = mock_runcmd
+ mock_runcmd.return_value = (0, 'some/long/path/top.log\ntop.log', '')
+ found_first_logs = self.result.GetTopFile()
+ self.assertEqual(found_first_logs, 'some/long/path/top.log')
+
+ @mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput')
+ def test_get_top_file_finds_no_logs(self, mock_runcmd):
+ """Error case when no log file found."""
+ self.result.results_dir = '/tmp/test_results'
+ self.result.ce.RunCommandWOutput = mock_runcmd
+ mock_runcmd.return_value = (0, '', '')
+ found_no_logs = self.result.GetTopFile()
+ self.assertEqual(found_no_logs, '')
+
+ @mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput')
def test_get_cpustats_file_finds_single_log(self, mock_runcmd):
"""Expected behavior when a single log file found."""
self.result.results_dir = '/tmp/test_results'
@@ -827,7 +930,6 @@ class ResultTest(unittest.TestCase):
Expecting that data for all cores will be present in
returned cpustats.
"""
- self.maxDiff = None
self.result.cpustats_log_file = '/tmp/somelogfile.log'
with mock.patch('__builtin__.open',
mock.mock_open(read_data=CPUSTATS_UNIQ_OUTPUT)) as mo:
@@ -865,6 +967,29 @@ class ResultTest(unittest.TestCase):
mo.assert_has_calls(calls)
self.assertEqual(cpustats, {})
+ def test_process_top_results_with_valid_data(self):
+ """Process top log with valid data."""
+
+ self.result.top_log_file = '/tmp/fakelogfile.log'
+ with mock.patch('__builtin__.open',
+ mock.mock_open(read_data=TOP_LOG)) as mo:
+ topproc = self.result.ProcessTopResults()
+ # Check that the log got opened and data were read/parsed.
+ calls = [mock.call('/tmp/fakelogfile.log')]
+ mo.assert_has_calls(calls)
+ self.assertEqual(topproc, TOP_DATA)
+
+ def test_process_top_results_from_empty_file(self):
+ """Error case when log exists but file is empty."""
+ self.result.top_log_file = '/tmp/emptylogfile.log'
+ with mock.patch('__builtin__.open', mock.mock_open(read_data='')) as mo:
+ topcalls = self.result.ProcessTopResults()
+ # Check that the log got opened and parsed successfully and empty data
+ # returned.
+ calls = [mock.call('/tmp/emptylogfile.log')]
+ mo.assert_has_calls(calls)
+ self.assertEqual(topcalls, [])
+
@mock.patch.object(misc, 'GetInsideChrootPath')
@mock.patch.object(command_executer.CommandExecuter, 'ChrootRunCommand')
def test_generate_perf_report_files(self, mock_chrootruncmd, mock_getpath):
@@ -928,6 +1053,10 @@ class ResultTest(unittest.TestCase):
self.callGetCpustatsFile = True
return []
+ def FakeGetTopFile():
+ self.callGetTopFile = True
+ return []
+
def FakeProcessResults(show_results=False):
if show_results:
pass
@@ -944,6 +1073,7 @@ class ResultTest(unittest.TestCase):
self.callGetPerfReportFiles = False
self.callGetTurbostatFile = False
self.callGetCpustatsFile = False
+ self.callGetTopFile = False
self.callProcessResults = False
self.result.GetResultsDir = FakeGetResultsDir
@@ -952,6 +1082,7 @@ class ResultTest(unittest.TestCase):
self.result.GeneratePerfReportFiles = FakeGetPerfReportFiles
self.result.GetTurbostatFile = FakeGetTurbostatFile
self.result.GetCpustatsFile = FakeGetCpustatsFile
+ self.result.GetTopFile = FakeGetTopFile
self.result.ProcessResults = FakeProcessResults
self.result.PopulateFromRun(OUTPUT, '', 0, 'test', 'telemetry_Crosperf',
@@ -962,6 +1093,7 @@ class ResultTest(unittest.TestCase):
self.assertTrue(self.callGetPerfReportFiles)
self.assertTrue(self.callGetTurbostatFile)
self.assertTrue(self.callGetCpustatsFile)
+ self.assertTrue(self.callGetTopFile)
self.assertTrue(self.callProcessResults)
def FakeGetKeyvals(self, show_all=False):