aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGeorge Burgess IV <gbiv@google.com>2016-08-08 16:14:24 -0700
committerchrome-bot <chrome-bot@chromium.org>2016-08-10 10:31:09 -0700
commite56ceb4e7387a7f7ccb6d8d3b03e3e9692453cbd (patch)
treeeb962d053dfb0ff502972d558be48e3756f7eefb
parent458dda2951fbffd8def8bc04b9568458fbe22e52 (diff)
downloadtoolchain-utils-e56ceb4e7387a7f7ccb6d8d3b03e3e9692453cbd.tar.gz
Random nit fixes in crosperf; NFC.
This also removes results_sorter.py, which seems to be unused and untested. TEST=./run_tests.sh passes BUG=None Change-Id: I926affb23c78a8a0aedea4ab279da590f8cf93b5 Reviewed-on: https://chrome-internal-review.googlesource.com/273995 Commit-Ready: George Burgess <gbiv@google.com> Tested-by: George Burgess <gbiv@google.com> Reviewed-by: Caroline Tice <cmtice@google.com>
-rw-r--r--crosperf/benchmark_run.py10
-rw-r--r--crosperf/experiment.py2
-rw-r--r--crosperf/experiment_status.py42
-rw-r--r--crosperf/machine_manager.py37
-rw-r--r--crosperf/results_cache.py40
-rw-r--r--crosperf/results_sorter.py50
-rw-r--r--crosperf/schedv2.py12
7 files changed, 64 insertions, 129 deletions
diff --git a/crosperf/benchmark_run.py b/crosperf/benchmark_run.py
index d2a6f7d9..e53187e2 100644
--- a/crosperf/benchmark_run.py
+++ b/crosperf/benchmark_run.py
@@ -172,9 +172,7 @@ class BenchmarkRun(threading.Thread):
(self.name, machine.name,
datetime.datetime.now()))
break
- else:
- sleep_duration = 10
- time.sleep(sleep_duration)
+ time.sleep(10)
return machine
def GetExtraAutotestArgs(self):
@@ -207,9 +205,9 @@ class BenchmarkRun(threading.Thread):
else:
self.machine_manager.ImageMachine(machine, self.label)
self.timeline.Record(STATUS_RUNNING)
- [retval, out, err] = self.suite_runner.Run(machine.name, self.label,
- self.benchmark, self.test_args,
- self.profiler_args)
+ retval, out, err = self.suite_runner.Run(machine.name, self.label,
+ self.benchmark, self.test_args,
+ self.profiler_args)
self.run_completed = True
return Result.CreateFromRun(self._logger, self.log_level, self.label,
self.machine, out, err, retval,
diff --git a/crosperf/experiment.py b/crosperf/experiment.py
index 0a0f3fc0..dbcde213 100644
--- a/crosperf/experiment.py
+++ b/crosperf/experiment.py
@@ -119,7 +119,7 @@ class Experiment(object):
benchmark_runs = []
for label in self.labels:
for benchmark in self.benchmarks:
- for iteration in range(1, benchmark.iterations + 1):
+ for iteration in xrange(1, benchmark.iterations + 1):
benchmark_run_name = '%s: %s (%s)' % (label.name, benchmark.name,
iteration)
diff --git a/crosperf/experiment_status.py b/crosperf/experiment_status.py
index 8cada078..627db99e 100644
--- a/crosperf/experiment_status.py
+++ b/crosperf/experiment_status.py
@@ -1,8 +1,11 @@
-# Copyright 2011 Google Inc. All Rights Reserved.
+# Copyright 2011 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
"""The class to show the banner."""
from __future__ import print_function
+import collections
import datetime
import time
@@ -83,20 +86,17 @@ class ExperimentStatus(object):
def GetStatusString(self):
"""Get the status string of all the benchmark_runs."""
- status_bins = {}
+ status_bins = collections.defaultdict(list)
for benchmark_run in self.experiment.benchmark_runs:
- if benchmark_run.timeline.GetLastEvent() not in status_bins:
- status_bins[benchmark_run.timeline.GetLastEvent()] = []
status_bins[benchmark_run.timeline.GetLastEvent()].append(benchmark_run)
status_strings = []
- for key, val in status_bins.items():
+ for key, val in status_bins.iteritems():
if key == 'RUNNING':
- status_strings.append('%s: %s' %
- (key, self._GetNamesAndIterations(val)))
+ get_description = self._GetNamesAndIterations
else:
- status_strings.append('%s: %s' %
- (key, self._GetCompactNamesAndIterations(val)))
+ get_description = self._GetCompactNamesAndIterations
+ status_strings.append('%s: %s' % (key, get_description(val)))
thread_status = ''
thread_status_format = 'Thread Status: \n{}\n'
@@ -124,26 +124,22 @@ class ExperimentStatus(object):
return ' %s (%s)' % (len(strings), ', '.join(strings))
def _GetCompactNamesAndIterations(self, benchmark_runs):
- output = ''
- labels = {}
+ grouped_benchmarks = collections.defaultdict(list)
for benchmark_run in benchmark_runs:
- if benchmark_run.label.name not in labels:
- labels[benchmark_run.label.name] = []
+ grouped_benchmarks[benchmark_run.label.name].append(benchmark_run)
- for label in labels:
+ output_segs = []
+ for label_name, label_runs in grouped_benchmarks.iteritems():
strings = []
- benchmark_iterations = {}
- for benchmark_run in benchmark_runs:
- if benchmark_run.label.name != label:
- continue
+ benchmark_iterations = collections.defaultdict(list)
+ for benchmark_run in label_runs:
+ assert benchmark_run.label.name == label_name
benchmark_name = benchmark_run.benchmark.name
- if benchmark_name not in benchmark_iterations:
- benchmark_iterations[benchmark_name] = []
benchmark_iterations[benchmark_name].append(benchmark_run.iteration)
- for key, val in benchmark_iterations.items():
+ for key, val in benchmark_iterations.iteritems():
val.sort()
iterations = ','.join(map(str, val))
strings.append('{} [{}]'.format(key, iterations))
- output += ' ' + label + ': ' + ', '.join(strings) + '\n'
+ output_segs.append(' ' + label_name + ': ' + ', '.join(strings) + '\n')
- return ' %s \n%s' % (len(benchmark_runs), output)
+ return ' %s \n%s' % (len(benchmark_runs), ''.join(output_segs))
diff --git a/crosperf/machine_manager.py b/crosperf/machine_manager.py
index 0ee17c53..2fdf141b 100644
--- a/crosperf/machine_manager.py
+++ b/crosperf/machine_manager.py
@@ -140,7 +140,7 @@ class CrosMachine(object):
self.checksum_string = ''
exclude_lines_list = ['MHz', 'BogoMIPS', 'bogomips']
for line in self.cpuinfo.splitlines():
- if not any([e in line for e in exclude_lines_list]):
+ if not any(e in line for e in exclude_lines_list):
self.checksum_string += line
self.checksum_string += ' ' + str(self.phys_kbytes)
@@ -406,33 +406,28 @@ class MachineManager(object):
m.released_time = time.time()
if self.GetAvailableMachines(label):
break
- else:
- sleep_time = max(1, min(self.acquire_timeout, check_interval_time))
- time.sleep(sleep_time)
- self.acquire_timeout -= sleep_time
+ sleep_time = max(1, min(self.acquire_timeout, check_interval_time))
+ time.sleep(sleep_time)
+ self.acquire_timeout -= sleep_time
if self.acquire_timeout < 0:
- machine_names = []
- for machine in machines:
- machine_names.append(machine.name)
self.logger.LogFatal('Could not acquire any of the '
"following machines: '%s'" %
- ', '.join(machine_names))
+ ', '.join(machine.name for machine in machines))
### for m in self._machines:
### if (m.locked and time.time() - m.released_time < 10 and
### m.checksum == image_checksum):
### return None
- for m in [machine
- for machine in self.GetAvailableMachines(label)
- if not machine.locked]:
- if image_checksum and (m.checksum == image_checksum):
+ unlocked_machines = [machine
+ for machine in self.GetAvailableMachines(label)
+ if not machine.locked]
+ for m in unlocked_machines:
+ if image_checksum and m.checksum == image_checksum:
m.locked = True
m.test_run = threading.current_thread()
return m
- for m in [machine
- for machine in self.GetAvailableMachines(label)
- if not machine.locked]:
+ for m in unlocked_machines:
if not m.checksum:
m.locked = True
m.test_run = threading.current_thread()
@@ -443,9 +438,7 @@ class MachineManager(object):
# the number of re-images.
# TODO(asharif): If we centralize the thread-scheduler, we wont need this
# code and can implement minimal reimaging code more cleanly.
- for m in [machine
- for machine in self.GetAvailableMachines(label)
- if not machine.locked]:
+ for m in unlocked_machines:
if time.time() - m.released_time > 15:
# The release time gap is too large, so it is probably in the start
# stage, we need to reset the released_time.
@@ -470,7 +463,7 @@ class MachineManager(object):
with self._lock:
for m in self._machines:
if machine.name == m.name:
- assert m.locked == True, 'Tried to double-release %s' % m.name
+ assert m.locked, 'Tried to double-release %s' % m.name
m.released_time = time.time()
m.locked = False
m.status = 'Available'
@@ -487,9 +480,7 @@ class MachineManager(object):
def __str__(self):
with self._lock:
- l = ['MachineManager Status:']
- for m in self._machines:
- l.append(str(m))
+ l = ['MachineManager Status:'] + [str(m) for m in self._machines]
return '\n'.join(l)
def AsString(self):
diff --git a/crosperf/results_cache.py b/crosperf/results_cache.py
index 8bd23181..a06a9b62 100644
--- a/crosperf/results_cache.py
+++ b/crosperf/results_cache.py
@@ -304,20 +304,19 @@ class Result(object):
if not filename.endswith('.json'):
raise IOError('Attempt to call json on non-json file: %s' % filename)
- keyvals = dict()
- if os.path.exists(filename):
- raw_dict = dict()
- with open(filename, 'r') as f:
- raw_dict = json.load(f)
- for k in raw_dict:
- field_dict = raw_dict[k]
- for item in field_dict:
- keyname = k + "__" + item
- value_dict = field_dict[item]
- result = value_dict['value']
- units = value_dict['units']
- new_value = [ result, units ]
- keyvals[keyname] = new_value
+ if not os.path.exists(filename):
+ return {}
+
+ keyvals = {}
+ with open(filename, 'r') as f:
+ raw_dict = json.load(f)
+ for k, field_dict in raw_dict.iteritems():
+ for item, value_dict in field_dict.iteritems():
+ keyname = k + "__" + item
+ result = value_dict['value']
+ units = value_dict['units']
+ new_value = [result, units]
+ keyvals[keyname] = new_value
return keyvals
def ProcessResults(self):
@@ -343,9 +342,9 @@ class Result(object):
with open(keys_file, 'r') as f:
lines = f.readlines()
for l in lines:
- if l.find('Google Chrome ') == 0:
+ if l.startswith('Google Chrome '):
chrome_version = l
- if chrome_version[-1] == '\n':
+ if chrome_version.endswith('\n'):
chrome_version = chrome_version[:-1]
break
return chrome_version
@@ -515,7 +514,7 @@ class TelemetryResult(Result):
fields = line.split(',')
if len(fields) != len(labels):
continue
- for i in range(1, len(labels)):
+ for i in xrange(1, len(labels)):
key = '%s %s' % (fields[0], labels[i])
value = fields[i]
self.keyvals[key] = value
@@ -624,8 +623,7 @@ class ResultsCache(object):
if matching_dirs:
# Cache file found.
return matching_dirs[0]
- else:
- return None
+ return None
def GetCacheDirForWrite(self, get_keylist=False):
cache_path = self.FormCacheDir(self.GetCacheKeyList(False))[0]
@@ -690,7 +688,7 @@ class ResultsCache(object):
temp_test_args = '%s %s %s' % (self.test_args, self.profiler_args,
self.run_local)
- test_args_checksum = hashlib.md5(''.join(temp_test_args)).hexdigest()
+ test_args_checksum = hashlib.md5(temp_test_args).hexdigest()
return (image_path_checksum, self.test_name, str(self.iteration),
test_args_checksum, checksum, machine_checksum, machine_id_checksum,
str(self.CACHE_VERSION))
@@ -698,7 +696,7 @@ class ResultsCache(object):
def ReadResult(self):
if CacheConditions.FALSE in self.cache_conditions:
cache_dir = self.GetCacheDirForWrite()
- command = 'rm -rf {0}'.format(cache_dir)
+ command = 'rm -rf %s' % (cache_dir, )
self.ce.RunCommand(command)
return None
cache_dir = self.GetCacheDirForRead()
diff --git a/crosperf/results_sorter.py b/crosperf/results_sorter.py
deleted file mode 100644
index 1ebbb8b4..00000000
--- a/crosperf/results_sorter.py
+++ /dev/null
@@ -1,50 +0,0 @@
-# Copyright 2011 Google Inc. All Rights Reserved.
-"""Module to sort the results."""
-
-
-class ResultSorter(object):
- """Class to sort the results."""
-
- def __init__(self, benchmark_runs):
- self.table = {}
- for benchmark_run in benchmark_runs:
- benchmark_name = benchmark_run.benchmark_name
- label_name = benchmark_run.label_name
- if not benchmark_run.result:
- continue
- for autotest_key in benchmark_run.result.keyvals:
- result_tuple = (benchmark_name, autotest_key, label_name)
- if result_tuple not in self.table:
- self.table[result_tuple] = []
-
- cell = self.table[result_tuple]
- index = benchmark_run.iteration - 1
- while index >= len(cell):
- cell.append(None)
-
- result_value = benchmark_run.result.keyvals[autotest_key]
- try:
- result_value = float(result_value)
- except ValueError:
- pass
-
- cell[index] = result_value
-
- self.autotest_keys = {}
- for benchmark_run in benchmark_runs:
- benchmark_name = benchmark_run.benchmark_name
- if benchmark_name not in self.autotest_keys:
- self.autotest_keys[benchmark_name] = {}
- if not benchmark_run.result:
- continue
- for autotest_key in benchmark_run.result.keyvals:
- self.autotest_keys[benchmark_name][autotest_key] = True
-
- def GetAutotestKeys(self, benchmark_name):
- return self.autotest_keys[benchmark_name].keys()
-
- def GetResults(self, benchmark_name, autotest_key, label_name):
- try:
- return self.table[(benchmark_name, autotest_key, label_name)]
- except KeyError:
- return []
diff --git a/crosperf/schedv2.py b/crosperf/schedv2.py
index 7e5bd8c9..90fe83a3 100644
--- a/crosperf/schedv2.py
+++ b/crosperf/schedv2.py
@@ -1,5 +1,6 @@
-
-# Copyright 2015 Google Inc. All Rights Reserved.
+# Copyright 2015 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
"""Module to optimize the scheduling of benchmark_run tasks."""
@@ -241,7 +242,7 @@ class Schedv2(object):
self._read_br_cache()
# Mapping from label to a list of benchmark_runs.
- self._label_brl_map = dict([(l, []) for l in self._labels])
+ self._label_brl_map = dict((l, []) for l in self._labels)
for br in self._experiment.benchmark_runs:
assert br.label in self._label_brl_map
# Only put no-cache-hit br into the map.
@@ -262,7 +263,8 @@ class Schedv2(object):
def run_sched(self):
"""Start all dut worker threads and return immediately."""
- _ = [w.start() for w in self._active_workers]
+ for w in self._active_workers:
+ w.start()
def _read_br_cache(self):
"""Use multi-threading to read cache for all benchmarkruns.
@@ -297,7 +299,7 @@ class Schedv2(object):
(n_threads - 1) * benchmarkruns_per_thread:])
# Assert: aggregation of benchmarkrun_segments equals to benchmark_runs.
- assert sum([len(x) for x in benchmarkrun_segments]) == n_benchmarkruns
+ assert sum(len(x) for x in benchmarkrun_segments) == n_benchmarkruns
# Create and start all readers.
cache_readers = [