aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--crosperf/experiment_runner.py6
-rwxr-xr-xcrosperf/experiment_runner_unittest.py18
-rw-r--r--crosperf/results_cache.py73
-rwxr-xr-xcrosperf/results_cache_unittest.py113
4 files changed, 129 insertions, 81 deletions
diff --git a/crosperf/experiment_runner.py b/crosperf/experiment_runner.py
index e2bd50db..21fa3ea0 100644
--- a/crosperf/experiment_runner.py
+++ b/crosperf/experiment_runner.py
@@ -275,8 +275,8 @@ class ExperimentRunner(object):
all_failed = True
topstats_file = os.path.join(results_directory, 'topstats.log')
- self.l.LogOutput('Storing top5 statistics of each benchmark run into %s.' %
- topstats_file)
+ self.l.LogOutput(
+ 'Storing top statistics of each benchmark run into %s.' % topstats_file)
with open(topstats_file, 'w') as top_fd:
for benchmark_run in experiment.benchmark_runs:
if benchmark_run.result:
@@ -291,7 +291,7 @@ class ExperimentRunner(object):
# Header with benchmark run name.
top_fd.write('%s\n' % str(benchmark_run))
# Formatted string with top statistics.
- top_fd.write(benchmark_run.result.FormatStringTop5())
+ top_fd.write(benchmark_run.result.FormatStringTopCommands())
top_fd.write('\n\n')
if all_failed:
diff --git a/crosperf/experiment_runner_unittest.py b/crosperf/experiment_runner_unittest.py
index 0bbab292..31d02e71 100755
--- a/crosperf/experiment_runner_unittest.py
+++ b/crosperf/experiment_runner_unittest.py
@@ -409,11 +409,11 @@ class ExperimentRunnerTest(unittest.TestCase):
@mock.patch.object(Result, 'CompressResultsTo')
@mock.patch.object(Result, 'CopyResultsTo')
@mock.patch.object(Result, 'CleanUp')
- @mock.patch.object(Result, 'FormatStringTop5')
+ @mock.patch.object(Result, 'FormatStringTopCommands')
@mock.patch('builtins.open', new_callable=mock.mock_open)
- def test_store_results(self, mock_open, mock_top5, mock_cleanup, mock_copy,
- mock_compress, _mock_text_report, mock_report,
- mock_writefile, mock_mkdir, mock_rmdir):
+ def test_store_results(self, mock_open, mock_top_commands, mock_cleanup,
+ mock_copy, mock_compress, _mock_text_report,
+ mock_report, mock_writefile, mock_mkdir, mock_rmdir):
self.mock_logger.Reset()
self.exp.results_directory = '/usr/local/crosperf-results'
@@ -441,7 +441,7 @@ class ExperimentRunnerTest(unittest.TestCase):
self.assertEqual(mock_rmdir.call_count, 0)
self.assertEqual(self.mock_logger.LogOutputCount, 0)
self.assertEqual(mock_open.call_count, 0)
- self.assertEqual(mock_top5.call_count, 0)
+ self.assertEqual(mock_top_commands.call_count, 0)
# Test 2. _terminated is false; everything works properly.
fake_result = Result(self.mock_logger, self.exp.labels[0], 'average',
@@ -470,7 +470,7 @@ class ExperimentRunnerTest(unittest.TestCase):
self.assertEqual(self.mock_logger.LogOutputCount, 5)
self.assertEqual(self.mock_logger.output_msgs, [
'Storing experiment file in /usr/local/crosperf-results.',
- 'Storing top5 statistics of each benchmark run into'
+ 'Storing top statistics of each benchmark run into'
' /usr/local/crosperf-results/topstats.log.',
'Storing results of each benchmark run.',
'Storing results report in /usr/local/crosperf-results.',
@@ -482,9 +482,9 @@ class ExperimentRunnerTest(unittest.TestCase):
'w')
mock_open().write.assert_called()
- # Check top5 calls with no arguments.
- top5calls = [mock.call()] * 6
- self.assertEqual(mock_top5.call_args_list, top5calls)
+ # Check top calls with no arguments.
+ topcalls = [mock.call()] * 6
+ self.assertEqual(mock_top_commands.call_args_list, topcalls)
# Test 3. Test compress_results.
self.exp.compress_results = True
diff --git a/crosperf/results_cache.py b/crosperf/results_cache.py
index b6050b61..f7b78e39 100644
--- a/crosperf/results_cache.py
+++ b/crosperf/results_cache.py
@@ -76,22 +76,34 @@ class Result(object):
"""Get the list of top commands consuming CPU on the machine."""
return self.top_cmds
- def FormatStringTop5(self):
- """Get formatted top5 string.
+ def FormatStringTopCommands(self):
+ """Get formatted string of top commands.
- Get the formatted string with top5 commands consuming CPU on DUT machine.
+ Get the formatted string with top commands consuming CPU on DUT machine.
+ Number of "non-chrome" processes in the list is limited to 5.
"""
format_list = [
- 'Top 5 commands with highest CPU usage:',
+ 'Top commands with highest CPU usage:',
# Header.
'%20s %9s %6s %s' % ('COMMAND', 'AVG CPU%', 'COUNT', 'HIGHEST 5'),
'-' * 50,
]
if self.top_cmds:
- for topcmd in self.top_cmds[:5]:
- print_line = '%20s %9.2f %6s %s' % (topcmd['cmd'], topcmd['cpu_avg'],
- topcmd['count'], topcmd['top5'])
+ # After switching to top processes we have to expand the list since there
+ # will be a lot of 'chrome' processes (up to 10, sometimes more) in the
+ # top.
+ # Let's limit the list size by the number of non-chrome processes.
+ limit_of_non_chrome_procs = 5
+ num_of_non_chrome_procs = 0
+ for topcmd in self.top_cmds:
+ print_line = '%20s %9.2f %6s %s' % (
+ topcmd['cmd'], topcmd['cpu_use_avg'], topcmd['count'],
+ topcmd['top5_cpu_use'])
format_list.append(print_line)
+ if not topcmd['cmd'].startswith('chrome'):
+ num_of_non_chrome_procs += 1
+ if num_of_non_chrome_procs >= limit_of_non_chrome_procs:
+ break
else:
format_list.append('[NO DATA FROM THE TOP LOG]')
format_list.append('-' * 50)
@@ -559,9 +571,9 @@ class Result(object):
Returns:
List of dictionaries with the following keyvals:
'cmd': command name (string),
- 'cpu_avg': average cpu usage (float),
+ 'cpu_use_avg': average cpu usage (float),
'count': number of occurrences (int),
- 'top5': up to 5 highest cpu usages (descending list of floats)
+ 'top5_cpu_use': up to 5 highest cpu usages (descending list of floats)
Example of the top log:
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
@@ -612,7 +624,7 @@ class Result(object):
process = {
# NOTE: One command may be represented by multiple processes.
'cmd': match.group('cmd'),
- 'pid': int(match.group('pid')),
+ 'pid': match.group('pid'),
'cpu_use': float(match.group('cpu_use')),
}
@@ -634,7 +646,10 @@ class Result(object):
# not running.
# On 1-core DUT 90% chrome cpu load occurs in 55%, 95% in 33% and 100% in 2%
# of snapshots accordingly.
- CHROME_HIGH_CPU_LOAD = 90
+ # Threshold of "high load" is reduced to 70% (from 90) when we switched to
+ # topstats per process. From experiment data the rest 20% are distributed
+ # among other chrome processes.
+ CHROME_HIGH_CPU_LOAD = 70
# Number of snapshots where chrome is heavily used.
high_load_snapshots = 0
# Total CPU use per process in ALL active snapshots.
@@ -645,35 +660,41 @@ class Result(object):
topcmds = []
for snapshot_processes in snapshots:
- # CPU usage per command in one snapshot.
- cmd_cpu_use_per_snapshot = collections.defaultdict(float)
+ # CPU usage per command, per PID in one snapshot.
+ cmd_cpu_use_per_snapshot = collections.defaultdict(dict)
for process in snapshot_processes:
cmd = process['cmd']
cpu_use = process['cpu_use']
+ pid = process['pid']
+ cmd_cpu_use_per_snapshot[cmd][pid] = cpu_use
- # Collect CPU usage per command.
- cmd_cpu_use_per_snapshot[cmd] += cpu_use
+ # Chrome processes, pid: cpu_usage.
+ chrome_processes = cmd_cpu_use_per_snapshot.get('chrome', {})
+ chrome_cpu_use_list = chrome_processes.values()
- if cmd_cpu_use_per_snapshot.setdefault('chrome',
- 0.0) > CHROME_HIGH_CPU_LOAD:
- # Combined CPU usage of "chrome" command exceeds "High load" threshold
- # which means DUT is busy running a benchmark.
+ if chrome_cpu_use_list and max(
+ chrome_cpu_use_list) > CHROME_HIGH_CPU_LOAD:
+ # CPU usage of any of the "chrome" processes exceeds "High load"
+ # threshold which means DUT is busy running a benchmark.
high_load_snapshots += 1
- for cmd, cpu_use in cmd_cpu_use_per_snapshot.items():
- # Update total CPU usage.
- cmd_total_cpu_use[cmd] += cpu_use
+ for cmd, cpu_use_per_pid in cmd_cpu_use_per_snapshot.items():
+ for pid, cpu_use in cpu_use_per_pid.items():
+ # Append PID to the name of the command.
+ cmd_with_pid = cmd + '-' + pid
+ cmd_total_cpu_use[cmd_with_pid] += cpu_use
- # Add cpu_use into command top cpu usages, sorted in descending order.
- heapq.heappush(cmd_top5_cpu_use[cmd], round(cpu_use, 1))
+ # Add cpu_use into command top cpu usages, sorted in descending
+ # order.
+ heapq.heappush(cmd_top5_cpu_use[cmd_with_pid], round(cpu_use, 1))
for consumer, usage in sorted(
cmd_total_cpu_use.items(), key=lambda x: x[1], reverse=True):
# Iterate through commands by descending order of total CPU usage.
topcmd = {
'cmd': consumer,
- 'cpu_avg': usage / high_load_snapshots,
+ 'cpu_use_avg': usage / high_load_snapshots,
'count': len(cmd_top5_cpu_use[consumer]),
- 'top5': heapq.nlargest(5, cmd_top5_cpu_use[consumer]),
+ 'top5_cpu_use': heapq.nlargest(5, cmd_top5_cpu_use[consumer]),
}
topcmds.append(topcmd)
diff --git a/crosperf/results_cache_unittest.py b/crosperf/results_cache_unittest.py
index ed6ff95b..2c1996c4 100755
--- a/crosperf/results_cache_unittest.py
+++ b/crosperf/results_cache_unittest.py
@@ -223,40 +223,52 @@ TOP_LOG = \
"""
TOP_DATA = [
{
- 'cmd': 'chrome',
- 'cpu_avg': 124.75,
+ 'cmd': 'chrome-5745',
+ 'cpu_use_avg': 115.35,
'count': 2,
- 'top5': [125.7, 123.8],
+ 'top5_cpu_use': [122.8, 107.9],
},
{
- 'cmd': 'irq/cros-ec',
- 'cpu_avg': 1.0,
+ 'cmd': 'chrome-5713',
+ 'cpu_use_avg': 8.9,
'count': 1,
- 'top5': [2.0],
+ 'top5_cpu_use': [17.8]
},
{
- 'cmd': 'spi5',
- 'cpu_avg': 0.5,
+ 'cmd': 'irq/cros-ec-912',
+ 'cpu_use_avg': 1.0,
'count': 1,
- 'top5': [1.0],
+ 'top5_cpu_use': [2.0],
},
{
- 'cmd': 'sshd',
- 'cpu_avg': 0.5,
+ 'cmd': 'chrome-5205',
+ 'cpu_use_avg': 0.5,
'count': 1,
- 'top5': [1.0],
+ 'top5_cpu_use': [1.0]
},
{
- 'cmd': 'rcu_preempt',
- 'cpu_avg': 0.5,
+ 'cmd': 'spi5-121',
+ 'cpu_use_avg': 0.5,
'count': 1,
- 'top5': [1.0],
+ 'top5_cpu_use': [1.0],
},
{
- 'cmd': 'kworker/4:2',
- 'cpu_avg': 0.5,
+ 'cmd': 'sshd-4811',
+ 'cpu_use_avg': 0.5,
'count': 1,
- 'top5': [1.0],
+ 'top5_cpu_use': [1.0],
+ },
+ {
+ 'cmd': 'rcu_preempt-7',
+ 'cpu_use_avg': 0.5,
+ 'count': 1,
+ 'top5_cpu_use': [1.0],
+ },
+ {
+ 'cmd': 'kworker/4:2-855',
+ 'cpu_use_avg': 0.5,
+ 'count': 1,
+ 'top5_cpu_use': [1.0],
},
]
TOP_OUTPUT = \
@@ -994,69 +1006,84 @@ class ResultTest(unittest.TestCase):
mo.assert_has_calls(calls)
self.assertEqual(topcalls, [])
- def test_format_string_top5_cmds(self):
- """Test formatted string with top5 commands."""
+ def test_format_string_top_cmds(self):
+ """Test formatted string with top commands."""
self.result.top_cmds = [
{
- 'cmd': 'chrome',
- 'cpu_avg': 119.753453465,
+ 'cmd': 'chrome-111',
+ 'cpu_use_avg': 119.753453465,
'count': 44444,
- 'top5': [222.8, 217.9, 217.8, 191.0, 189.9],
+ 'top5_cpu_use': [222.8, 217.9, 217.8, 191.0, 189.9],
+ },
+ {
+ 'cmd': 'chrome-222',
+ 'cpu_use_avg': 100,
+ 'count': 33333,
+ 'top5_cpu_use': [200.0, 195.0, 190.0, 185.0, 180.0],
},
{
'cmd': 'irq/230-cros-ec',
- 'cpu_avg': 10.000000000000001,
+ 'cpu_use_avg': 10.000000000000001,
'count': 1000,
- 'top5': [11.5, 11.4, 11.3, 11.2, 11.1],
+ 'top5_cpu_use': [11.5, 11.4, 11.3, 11.2, 11.1],
},
{
'cmd': 'powerd',
- 'cpu_avg': 2.0,
+ 'cpu_use_avg': 2.0,
'count': 2,
- 'top5': [3.0, 1.0]
+ 'top5_cpu_use': [3.0, 1.0]
},
{
- 'cmd': 'cmd1',
- 'cpu_avg': 1.0,
+ 'cmd': 'cmd3',
+ 'cpu_use_avg': 1.0,
'count': 1,
- 'top5': [1.0],
+ 'top5_cpu_use': [1.0],
},
{
- 'cmd': 'cmd2',
- 'cpu_avg': 1.0,
+ 'cmd': 'cmd4',
+ 'cpu_use_avg': 1.0,
'count': 1,
- 'top5': [1.0],
+ 'top5_cpu_use': [1.0],
+ },
+ {
+ 'cmd': 'cmd5',
+ 'cpu_use_avg': 1.0,
+ 'count': 1,
+ 'top5_cpu_use': [1.0],
},
{
- 'cmd': 'not_for_print',
+ 'cmd': 'cmd6_not_for_print',
'cpu_avg': 1.0,
'count': 1,
'top5': [1.0],
},
]
- form_str = self.result.FormatStringTop5()
+ form_str = self.result.FormatStringTopCommands()
self.assertEqual(
form_str, '\n'.join([
- 'Top 5 commands with highest CPU usage:',
+ 'Top commands with highest CPU usage:',
' COMMAND AVG CPU% COUNT HIGHEST 5',
'-' * 50,
- ' chrome 119.75 44444 '
+ ' chrome-111 119.75 44444 '
'[222.8, 217.9, 217.8, 191.0, 189.9]',
+ ' chrome-222 100.00 33333 '
+ '[200.0, 195.0, 190.0, 185.0, 180.0]',
' irq/230-cros-ec 10.00 1000 '
'[11.5, 11.4, 11.3, 11.2, 11.1]',
' powerd 2.00 2 [3.0, 1.0]',
- ' cmd1 1.00 1 [1.0]',
- ' cmd2 1.00 1 [1.0]',
+ ' cmd3 1.00 1 [1.0]',
+ ' cmd4 1.00 1 [1.0]',
+ ' cmd5 1.00 1 [1.0]',
'-' * 50,
]))
- def test_format_string_top5_calls_no_data(self):
- """Test formatted string of top5 with no data."""
+ def test_format_string_top_calls_no_data(self):
+ """Test formatted string of top with no data."""
self.result.top_cmds = []
- form_str = self.result.FormatStringTop5()
+ form_str = self.result.FormatStringTopCommands()
self.assertEqual(
form_str, '\n'.join([
- 'Top 5 commands with highest CPU usage:',
+ 'Top commands with highest CPU usage:',
' COMMAND AVG CPU% COUNT HIGHEST 5',
'-' * 50,
'[NO DATA FROM THE TOP LOG]',