aboutsummaryrefslogtreecommitdiff
path: root/crosperf
diff options
context:
space:
mode:
authorLuis Lozano <llozano@chromium.org>2013-03-15 14:44:13 -0700
committerChromeBot <chrome-bot@google.com>2013-03-15 15:51:37 -0700
commitf81680c018729fd4499e1e200d04b48c4b90127c (patch)
tree940608da8374604b82edfdb2d7df55d065f05d4c /crosperf
parent2296ee0b914aba5bba07becab4ff68884ce9b8a5 (diff)
downloadtoolchain-utils-f81680c018729fd4499e1e200d04b48c4b90127c.tar.gz
Cleaned up directory after copy of tools from perforce directory
Got rid of stale copies of some tools like "crosperf" and moved all files under v14 directory (that came from perforce) into the top directory. BUG=None TEST=None Change-Id: I408d17a36ceb00e74db71403d2351fd466a14f8e Reviewed-on: https://gerrit-int.chromium.org/33887 Tested-by: Luis Lozano <llozano@chromium.org> Reviewed-by: Yunlian Jiang <yunlian@google.com> Commit-Queue: Luis Lozano <llozano@chromium.org>
Diffstat (limited to 'crosperf')
-rw-r--r--crosperf/benchmark.py4
-rw-r--r--crosperf/benchmark_run.py7
-rwxr-xr-xcrosperf/benchmark_run_unittest.py12
-rw-r--r--crosperf/config.py17
-rwxr-xr-xcrosperf/crosperf.py13
-rw-r--r--crosperf/experiment.py23
-rw-r--r--crosperf/experiment_factory.py27
-rwxr-xr-xcrosperf/experiment_factory_unittest.py4
-rw-r--r--crosperf/experiment_runner.py8
-rw-r--r--crosperf/image_checksummer.py38
-rw-r--r--crosperf/label.py8
-rw-r--r--crosperf/machine_manager.py47
-rwxr-xr-xcrosperf/machine_manager_unittest.py8
-rw-r--r--crosperf/perf_table.py39
-rw-r--r--crosperf/results_cache.py84
-rw-r--r--crosperf/results_organizer.py13
-rw-r--r--crosperf/results_report.py15
-rw-r--r--crosperf/settings_factory.py29
18 files changed, 292 insertions, 104 deletions
diff --git a/crosperf/benchmark.py b/crosperf/benchmark.py
index bc7f1fa8..8fe8a492 100644
--- a/crosperf/benchmark.py
+++ b/crosperf/benchmark.py
@@ -14,11 +14,13 @@ class Benchmark(object):
"""
def __init__(self, name, autotest_name, autotest_args, iterations,
- outlier_range, perf_args):
+ outlier_range, key_results_only, rm_chroot_tmp, perf_args):
self.name = name
self.autotest_name = autotest_name
self.autotest_args = autotest_args
self.iterations = iterations
self.outlier_range = outlier_range
self.perf_args = perf_args
+ self.key_results_only = key_results_only
+ self.rm_chroot_tmp = rm_chroot_tmp
self.iteration_adjusted = False
diff --git a/crosperf/benchmark_run.py b/crosperf/benchmark_run.py
index dc837937..80c95c4d 100644
--- a/crosperf/benchmark_run.py
+++ b/crosperf/benchmark_run.py
@@ -29,7 +29,8 @@ class BenchmarkRun(threading.Thread):
iteration,
cache_conditions,
machine_manager,
- logger_to_use):
+ logger_to_use,
+ share_users):
threading.Thread.__init__(self)
self.name = name
self._logger = logger_to_use
@@ -53,6 +54,7 @@ class BenchmarkRun(threading.Thread):
self._ce = command_executer.GetCommandExecuter(self._logger)
self.timeline = timeline.Timeline()
self.timeline.Record(STATUS_PENDING)
+ self.share_users = share_users
def run(self):
try:
@@ -67,7 +69,8 @@ class BenchmarkRun(threading.Thread):
self.label.board,
self.cache_conditions,
self._logger,
- self.label
+ self.label,
+ self.share_users
)
self.result = self.cache.ReadResult()
diff --git a/crosperf/benchmark_run_unittest.py b/crosperf/benchmark_run_unittest.py
index c4670c9c..47e027f4 100755
--- a/crosperf/benchmark_run_unittest.py
+++ b/crosperf/benchmark_run_unittest.py
@@ -19,14 +19,19 @@ from results_cache import MockResultsCache
class BenchmarkRunTest(unittest.TestCase):
def testDryRun(self):
my_label = MockLabel("test1", "image1", "/tmp/test_benchmark_run",
- "x86-alex", "chromeos-alex1", "")
- m = MockMachineManager("/tmp/chromeos_root")
+ "x86-alex", "chromeos-alex1",
+ image_args="",
+ image_md5sum="",
+ cache_dir="")
+ m = MockMachineManager("/tmp/chromeos_root", 0)
m.AddMachine("chromeos-alex1")
bench = Benchmark("PageCyler",
"Pyautoperf",
"",
1,
0.2,
+ False,
+ False,
"")
b = MockBenchmarkRun("test run",
bench,
@@ -34,7 +39,8 @@ class BenchmarkRunTest(unittest.TestCase):
1,
[],
m,
- logger.GetLogger())
+ logger.GetLogger(),
+ "")
b.cache = MockResultsCache()
b.autotest_runner = MockAutotestRunner()
b.start()
diff --git a/crosperf/config.py b/crosperf/config.py
new file mode 100644
index 00000000..45a3d000
--- /dev/null
+++ b/crosperf/config.py
@@ -0,0 +1,17 @@
+#!/usr/bin/python
+#
+# Copyright 2011 Google Inc. All Rights Reserved.
+
+"""A configure file."""
+config = {}
+
+
+def GetConfig(key):
+ try:
+ return config[key]
+ except KeyError:
+ return None
+
+
+def AddConfig(key, value):
+ config[key] = value
diff --git a/crosperf/crosperf.py b/crosperf/crosperf.py
index cfb48d7c..cb7911fd 100755
--- a/crosperf/crosperf.py
+++ b/crosperf/crosperf.py
@@ -18,8 +18,6 @@ from utils import logger
import test_flag
-l = logger.GetLogger()
-
class MyIndentedHelpFormatter(optparse.IndentedHelpFormatter):
def format_description(self, description):
@@ -65,12 +63,20 @@ def Main(argv):
description=Help().GetHelp(),
formatter=MyIndentedHelpFormatter(),
version="%prog 0.1")
+
+ parser.add_option("-l", "--log_dir",
+ dest="log_dir",
+ default="",
+ help="The log_dir, default is under <crosperf_logs>/logs")
+
SetupParserOptions(parser)
options, args = parser.parse_args(argv)
# Convert the relevant options that are passed in into a settings
# object which will override settings in the experiment file.
option_settings = ConvertOptionsToSettings(options)
+ log_dir = os.path.abspath(os.path.expanduser(options.log_dir))
+ logger.GetLogger(log_dir)
if len(args) == 2:
experiment_filename = args[1]
@@ -87,7 +93,8 @@ def Main(argv):
experiment_name = os.path.basename(experiment_filename)
experiment_file.GetGlobalSettings().SetField("name", experiment_name)
experiment = ExperimentFactory().GetExperiment(experiment_file,
- working_directory)
+ working_directory,
+ log_dir)
atexit.register(Cleanup, experiment)
diff --git a/crosperf/experiment.py b/crosperf/experiment.py
index e9dc3d07..2a4590c4 100644
--- a/crosperf/experiment.py
+++ b/crosperf/experiment.py
@@ -9,23 +9,20 @@ import time
from utils import logger
-from autotest_runner import AutotestRunner
from benchmark_run import BenchmarkRun
from machine_manager import MachineManager
from machine_manager import MockMachineManager
-from results_cache import ResultsCache
-from results_report import HTMLResultsReport
import test_flag
class Experiment(object):
"""Class representing an Experiment to be run."""
- def __init__(self, name, remote, rerun_if_failed, working_directory,
+ def __init__(self, name, remote, working_directory,
chromeos_root, cache_conditions, labels, benchmarks,
- experiment_file, email_to):
+ experiment_file, email_to, acquire_timeout, log_dir,
+ share_users):
self.name = name
- self.rerun_if_failed = rerun_if_failed
self.working_directory = working_directory
self.remote = remote
self.chromeos_root = chromeos_root
@@ -34,11 +31,12 @@ class Experiment(object):
self.email_to = email_to
self.results_directory = os.path.join(self.working_directory,
self.name + "_results")
-
+ self.log_dir = log_dir
self.labels = labels
self.benchmarks = benchmarks
self.num_complete = 0
self.num_run_complete = 0
+ self.share_users = share_users
# We need one chromeos_root to run the benchmarks in, but it doesn't
# matter where it is, unless the ABIs are different.
@@ -51,10 +49,10 @@ class Experiment(object):
"the image path.")
if test_flag.GetTestMode():
- self.machine_manager = MockMachineManager(chromeos_root)
+ self.machine_manager = MockMachineManager(chromeos_root, acquire_timeout)
else:
- self.machine_manager = MachineManager(chromeos_root)
- self.l = logger.GetLogger()
+ self.machine_manager = MachineManager(chromeos_root, acquire_timeout)
+ self.l = logger.GetLogger(log_dir)
for machine in remote:
self.machine_manager.AddMachine(machine)
@@ -75,7 +73,7 @@ class Experiment(object):
benchmark_run_name = "%s: %s (%s)" % (label.name, benchmark.name,
iteration)
full_name = "%s_%s_%s" % (label.name, benchmark.name, iteration)
- logger_to_use = logger.Logger(os.path.dirname(__file__),
+ logger_to_use = logger.Logger(self.log_dir,
"run.%s" % (full_name),
True)
benchmark_run = BenchmarkRun(benchmark_run_name,
@@ -84,7 +82,8 @@ class Experiment(object):
iteration,
self.cache_conditions,
self.machine_manager,
- logger_to_use)
+ logger_to_use,
+ self.share_users)
benchmark_runs.append(benchmark_run)
return benchmark_runs
diff --git a/crosperf/experiment_factory.py b/crosperf/experiment_factory.py
index bd3076dd..3c92ee3e 100644
--- a/crosperf/experiment_factory.py
+++ b/crosperf/experiment_factory.py
@@ -7,6 +7,7 @@ import os
import socket
from benchmark import Benchmark
+import config
from experiment import Experiment
from label import Label
from label import MockLabel
@@ -22,13 +23,18 @@ class ExperimentFactory(object):
of experiments could be produced.
"""
- def GetExperiment(self, experiment_file, working_directory):
+ def GetExperiment(self, experiment_file, working_directory, log_dir):
"""Construct an experiment from an experiment file."""
global_settings = experiment_file.GetGlobalSettings()
experiment_name = global_settings.GetField("name")
remote = global_settings.GetField("remote")
- rerun_if_failed = global_settings.GetField("rerun_if_failed")
chromeos_root = global_settings.GetField("chromeos_root")
+ rm_chroot_tmp = global_settings.GetField("rm_chroot_tmp")
+ key_results_only = global_settings.GetField("key_results_only")
+ acquire_timeout= global_settings.GetField("acquire_timeout")
+ cache_dir = global_settings.GetField("cache_dir")
+ config.AddConfig("no_email", global_settings.GetField("no_email"))
+ share_users = global_settings.GetField("share_users")
# Default cache hit conditions. The image checksum in the cache and the
# computed checksum of the image must match. Also a cache file must exist.
@@ -55,8 +61,13 @@ class ExperimentFactory(object):
iterations = benchmark_settings.GetField("iterations")
outlier_range = benchmark_settings.GetField("outlier_range")
perf_args = benchmark_settings.GetField("perf_args")
+ rm_chroot_tmp = benchmark_settings.GetField("rm_chroot_tmp")
+ key_results_only = benchmark_settings.GetField("key_results_only")
+
benchmark = Benchmark(benchmark_name, autotest_name, autotest_args,
- iterations, outlier_range, perf_args)
+ iterations, outlier_range,
+ key_results_only, rm_chroot_tmp,
+ perf_args)
benchmarks.append(benchmark)
# Construct labels.
@@ -69,6 +80,8 @@ class ExperimentFactory(object):
chromeos_root = label_settings.GetField("chromeos_root")
board = label_settings.GetField("board")
my_remote = label_settings.GetField("remote")
+ image_md5sum = label_settings.GetField("md5sum")
+ cache_dir = label_settings.GetField("cache_dir")
# TODO(yunlian): We should consolidate code in machine_manager.py
# to derermine whether we are running from within google or not
if ("corp.google.com" in socket.gethostname() and
@@ -83,19 +96,19 @@ class ExperimentFactory(object):
image_args = label_settings.GetField("image_args")
if test_flag.GetTestMode():
label = MockLabel(label_name, image, chromeos_root, board, my_remote,
- image_args)
+ image_args, image_md5sum, cache_dir)
else:
label = Label(label_name, image, chromeos_root, board, my_remote,
- image_args)
+ image_args, image_md5sum, cache_dir)
labels.append(label)
email = global_settings.GetField("email")
all_remote = list(set(all_remote))
- experiment = Experiment(experiment_name, all_remote, rerun_if_failed,
+ experiment = Experiment(experiment_name, all_remote,
working_directory, chromeos_root,
cache_conditions, labels, benchmarks,
experiment_file.Canonicalize(),
- email)
+ email, acquire_timeout, log_dir, share_users)
return experiment
diff --git a/crosperf/experiment_factory_unittest.py b/crosperf/experiment_factory_unittest.py
index fa943519..6cee6b74 100755
--- a/crosperf/experiment_factory_unittest.py
+++ b/crosperf/experiment_factory_unittest.py
@@ -32,7 +32,9 @@ EXPERIMENT_FILE_1 = """
class ExperimentFactoryTest(unittest.TestCase):
def testLoadExperimentFile1(self):
experiment_file = ExperimentFile(StringIO.StringIO(EXPERIMENT_FILE_1))
- experiment = ExperimentFactory().GetExperiment(experiment_file, "")
+ experiment = ExperimentFactory().GetExperiment(experiment_file,
+ working_directory="",
+ log_dir="")
self.assertEqual(experiment.remote, ["chromeos-alex3"])
self.assertEqual(len(experiment.benchmarks), 1)
diff --git a/crosperf/experiment_runner.py b/crosperf/experiment_runner.py
index b905bbdc..9212ba56 100644
--- a/crosperf/experiment_runner.py
+++ b/crosperf/experiment_runner.py
@@ -12,6 +12,7 @@ from utils import logger
from utils.email_sender import EmailSender
from utils.file_utils import FileUtils
+import config
from experiment_status import ExperimentStatus
from results_report import HTMLResultsReport
from results_report import TextResultsReport
@@ -25,7 +26,7 @@ class ExperimentRunner(object):
def __init__(self, experiment):
self._experiment = experiment
- self.l = logger.GetLogger()
+ self.l = logger.GetLogger(experiment.log_dir)
self._ce = command_executer.GetCommandExecuter(self.l)
self._terminated = False
@@ -58,7 +59,8 @@ class ExperimentRunner(object):
if not benchmark_run.cache_hit:
send_mail = True
break
- if not send_mail and not experiment.email_to:
+ if (not send_mail and not experiment.email_to
+ or config.GetConfig("no_email")):
return
label_names = []
@@ -100,7 +102,7 @@ class ExperimentRunner(object):
benchmark_run_path = os.path.join(results_directory,
benchmark_run_name)
benchmark_run.result.CopyResultsTo(benchmark_run_path)
- benchmark_run.result.CleanUp()
+ benchmark_run.result.CleanUp(benchmark_run.benchmark.rm_chroot_tmp)
def Run(self):
self._Run(self._experiment)
diff --git a/crosperf/image_checksummer.py b/crosperf/image_checksummer.py
index f75dc944..dcc1cb02 100644
--- a/crosperf/image_checksummer.py
+++ b/crosperf/image_checksummer.py
@@ -2,24 +2,38 @@
# Copyright 2011 Google Inc. All Rights Reserved.
+import os
import threading
+
from utils import logger
from utils.file_utils import FileUtils
class ImageChecksummer(object):
class PerImageChecksummer(object):
- def __init__(self, filename):
+ def __init__(self, label):
self._lock = threading.Lock()
- self.filename = filename
+ self.label = label
self._checksum = None
def Checksum(self):
with self._lock:
if not self._checksum:
- logger.GetLogger().LogOutput("Computing checksum for '%s'." %
- self.filename)
- self._checksum = FileUtils().Md5File(self.filename)
+ logger.GetLogger().LogOutput("Acquiring checksum for '%s'." %
+ self.label.name)
+ self._checksum = None
+ if self.label.chromeos_image:
+ if os.path.exists(self.label.chromeos_image):
+ self._checksum = FileUtils().Md5File(self.label.chromeos_image)
+ logger.GetLogger().LogOutput("Computed checksum is "
+ ": %s" % self._checksum)
+ if not self._checksum:
+ if self.label.image_md5sum:
+ self._checksum = self.label.image_md5sum
+ logger.GetLogger().LogOutput("Checksum in experiment file is "
+ ": %s" % self._checksum)
+ else:
+ raise Exception("Checksum computing error.")
logger.GetLogger().LogOutput("Checksum is: %s" % self._checksum)
return self._checksum
@@ -34,16 +48,16 @@ class ImageChecksummer(object):
*args, **kwargs)
return cls._instance
- def Checksum(self, filename):
+ def Checksum(self, label):
with self._lock:
- if filename not in self._per_image_checksummers:
- self._per_image_checksummers[filename] = (ImageChecksummer.
- PerImageChecksummer(filename))
- checksummer = self._per_image_checksummers[filename]
+ if label.name not in self._per_image_checksummers:
+ self._per_image_checksummers[label.name] = (ImageChecksummer.
+ PerImageChecksummer(label))
+ checksummer = self._per_image_checksummers[label.name]
try:
return checksummer.Checksum()
except Exception, e:
- logger.GetLogger().LogError("Could not compute checksum of file '%s'."
- % filename)
+ logger.GetLogger().LogError("Could not compute checksum of image in label"
+ " '%s'."% label.name)
raise e
diff --git a/crosperf/label.py b/crosperf/label.py
index 64ce352f..be7a868e 100644
--- a/crosperf/label.py
+++ b/crosperf/label.py
@@ -10,7 +10,7 @@ from utils.file_utils import FileUtils
class Label(object):
def __init__(self, name, chromeos_image, chromeos_root, board, remote,
- image_args):
+ image_args, image_md5sum, cache_dir):
# Expand ~
chromeos_root = os.path.expanduser(chromeos_root)
chromeos_image = os.path.expanduser(chromeos_image)
@@ -20,6 +20,8 @@ class Label(object):
self.board = board
self.remote = remote
self.image_args = image_args
+ self.image_md5sum = image_md5sum
+ self.cache_dir = cache_dir
if not chromeos_root:
chromeos_root = FileUtils().ChromeOSRootFromImage(chromeos_image)
@@ -38,13 +40,15 @@ class Label(object):
class MockLabel(object):
def __init__(self, name, chromeos_image, chromeos_root, board, remote,
- image_args):
+ image_args, image_md5sum, cache_dir):
self.name = name
self.chromeos_image = chromeos_image
self.board = board
self.remote = remote
+ self.cache_dir = cache_dir
if not chromeos_root:
self.chromeos_root = "/tmp/chromeos_root"
else:
self.chromeos_root = chromeos_root
self.image_args = image_args
+ self.image_md5sum = image_md5sum
diff --git a/crosperf/machine_manager.py b/crosperf/machine_manager.py
index 9eb9bcdf..29a4df7a 100644
--- a/crosperf/machine_manager.py
+++ b/crosperf/machine_manager.py
@@ -111,12 +111,12 @@ class CrosMachine(object):
def _GetMachineID(self):
ce = command_executer.GetCommandExecuter()
- command = "ifconfig"
+ command = "dump_vpd_log --full --stdout"
ret, if_out, _ = ce.CrosRunCommand(
command, return_output=True,
machine=self.name, chromeos_root=self.chromeos_root)
b = if_out.splitlines()
- a = [l for l in b if "lan" in l]
+ a = [l for l in b if "Product" in l]
self.machine_id = a[0]
assert ret == 0, "Could not get machine_id from machine: %s" % self.name
@@ -131,7 +131,7 @@ class CrosMachine(object):
class MachineManager(object):
- def __init__(self, chromeos_root):
+ def __init__(self, chromeos_root, acquire_timeout):
self._lock = threading.RLock()
self._all_machines = []
self._machines = []
@@ -140,6 +140,7 @@ class MachineManager(object):
self.chromeos_root = None
self.machine_checksum = {}
self.machine_checksum_string = {}
+ self.acquire_timeout = acquire_timeout
if os.path.isdir(lock_machine.Machine.LOCKS_DIR):
self.no_lock = False
@@ -149,7 +150,7 @@ class MachineManager(object):
self.chromeos_root = chromeos_root
def ImageMachine(self, machine, label):
- checksum = ImageChecksummer().Checksum(label.chromeos_image)
+ checksum = ImageChecksummer().Checksum(label)
if machine.checksum == checksum:
return
chromeos_root = label.chromeos_root
@@ -193,8 +194,8 @@ class MachineManager(object):
with self._lock:
assert cros_machine, "Machine can't be None"
for m in self._machines:
- assert m.name != cros_machine.name, (
- "Tried to double-lock %s" % cros_machine.name)
+ if m.name == cros_machine.name:
+ return
if self.no_lock:
locked = True
else:
@@ -226,25 +227,33 @@ class MachineManager(object):
return len(set(checksums)) == 1
def AcquireMachine(self, chromeos_image, label):
- image_checksum = ImageChecksummer().Checksum(chromeos_image)
+ image_checksum = ImageChecksummer().Checksum(label)
machines = self.GetMachines(label)
+ check_interval_time = 120
with self._lock:
# Lazily external lock machines
-
- for m in machines:
- if m not in self._initialized_machines:
- self._initialized_machines.append(m)
+ while self.acquire_timeout >= 0:
+ for m in machines:
+ new_machine = m not in self._all_machines
self._TryToLockMachine(m)
- m.released_time = time.time()
- if not self.AreAllMachineSame(label):
- logger.GetLogger().LogFatal("-- not all the machine are identical")
- if not self.GetAvailableMachines(label):
+ if new_machine:
+ m.released_time = time.time()
+ if not self.AreAllMachineSame(label):
+ logger.GetLogger().LogFatal("-- not all the machine are identical")
+ if self.GetAvailableMachines(label):
+ break
+ else:
+ sleep_time = max(1, min(self.acquire_timeout, check_interval_time))
+ time.sleep(sleep_time)
+ self.acquire_timeout -= sleep_time
+
+ if self.acquire_timeout < 0:
machine_names = []
for machine in machines:
machine_names.append(machine.name)
logger.GetLogger().LogFatal("Could not acquire any of the "
- "following machines: '%s'"
- % ", ".join(machine_names))
+ "following machines: '%s'"
+ % ", ".join(machine_names))
### for m in self._machines:
### if (m.locked and time.time() - m.released_time < 10 and
@@ -374,8 +383,8 @@ class MockCrosMachine(CrosMachine):
class MockMachineManager(MachineManager):
- def __init__(self, chromeos_root):
- super(MockMachineManager, self).__init__(chromeos_root)
+ def __init__(self, chromeos_root, acquire_timeout):
+ super(MockMachineManager, self).__init__(chromeos_root, acquire_timeout)
def _TryToLockMachine(self, cros_machine):
self._machines.append(cros_machine)
diff --git a/crosperf/machine_manager_unittest.py b/crosperf/machine_manager_unittest.py
index 98baf456..84266d5e 100755
--- a/crosperf/machine_manager_unittest.py
+++ b/crosperf/machine_manager_unittest.py
@@ -12,7 +12,7 @@ import machine_manager
class MyMachineManager(machine_manager.MachineManager):
def __init__(self, chromeos_root):
- super(MyMachineManager, self).__init__(chromeos_root)
+ super(MyMachineManager, self).__init__(chromeos_root, 0)
def _TryToLockMachine(self, cros_machine):
self._machines.append(cros_machine)
@@ -30,9 +30,11 @@ class MyMachineManager(machine_manager.MachineManager):
CHROMEOS_ROOT = "/tmp/chromeos-root"
MACHINE_NAMES = ["lumpy1", "lumpy2", "lumpy3", "daisy1", "daisy2"]
LABEL_LUMPY = label.MockLabel("lumpy", "image", CHROMEOS_ROOT, "lumpy",
- ["lumpy1", "lumpy2", "lumpy3", "lumpy4"], "")
+ ["lumpy1", "lumpy2", "lumpy3", "lumpy4"],
+ "", "", "")
LABEL_MIX = label.MockLabel("mix", "image", CHROMEOS_ROOT, "mix",
- ["daisy1", "daisy2", "lumpy3", "lumpy4"], "")
+ ["daisy1", "daisy2", "lumpy3", "lumpy4"],
+ "", "", "")
class MachineManagerTest(unittest.TestCase):
diff --git a/crosperf/perf_table.py b/crosperf/perf_table.py
index b3387ea8..3c8b88b8 100644
--- a/crosperf/perf_table.py
+++ b/crosperf/perf_table.py
@@ -7,6 +7,7 @@ import os
from utils import perf_diff
+
def ParsePerfReport(perf_file):
"""It should return a dict."""
@@ -22,9 +23,16 @@ class PerfTable(object):
self._label_names = label_names
self.perf_data = {}
self.GenerateData()
- # {benchmark:{perf_event1:[[{func1:number, func2:number},
- # {func1: number, func2: number}]], ...},
+
+ # {benchmark:{perf_event1:[[{func1:number, func2:number,
+ # rows_to_show: number}
+ # {func1: number, func2: number
+ # rows_to_show: number}]], ...},
# benchmark2:...}
+ # The rows_to_show is temp data recording how many
+ # rows have over 1% running time.
+ self.row_info = {}
+ self.GetRowsToShow()
def GenerateData(self):
for label in self._label_names:
@@ -39,9 +47,10 @@ class PerfTable(object):
def ReadPerfReport(self, perf_file, label, benchmark_name, iteration):
"""Add the data from one run to the dict."""
- if not os.path.isfile(perf_file):
- return
- perf_of_run = perf_diff.GetPerfDictFromReport(perf_file)
+ if os.path.isfile(perf_file):
+ perf_of_run = perf_diff.GetPerfDictFromReport(perf_file)
+ else:
+ perf_of_run = {}
if benchmark_name not in self.perf_data:
self.perf_data[benchmark_name] = {}
for event in perf_of_run:
@@ -55,4 +64,22 @@ class PerfTable(object):
data_for_label = ben_data[event][label_index]
while len(data_for_label) <= iteration:
data_for_label.append({})
- data_for_label[iteration] = perf_of_run[event]
+ if perf_of_run:
+ data_for_label[iteration] = perf_of_run[event]
+ else:
+ data_for_label[iteration] = {}
+
+ def GetRowsToShow(self):
+ for benchmark in self.perf_data:
+ if benchmark not in self.row_info:
+ self.row_info[benchmark] = {}
+ for event in self.perf_data[benchmark]:
+ rows = 0
+ for run in self.perf_data[benchmark][event]:
+ for iteration in run:
+ if perf_diff.ROWS_TO_SHOW in iteration:
+ rows = max(iteration[perf_diff.ROWS_TO_SHOW], rows)
+ # delete the temp data which stores how many rows of
+ # the perf data have over 1% running time.
+ del iteration[perf_diff.ROWS_TO_SHOW]
+ self.row_info[benchmark][event] = rows
diff --git a/crosperf/results_cache.py b/crosperf/results_cache.py
index c0600962..0357275d 100644
--- a/crosperf/results_cache.py
+++ b/crosperf/results_cache.py
@@ -15,7 +15,8 @@ from utils import misc
from image_checksummer import ImageChecksummer
-SCRATCH_DIR = "/home/%s/cros_scratch" % getpass.getuser()
+SCRATCH_BASE = "/home/%s/cros_scratch"
+SCRATCH_DIR = SCRATCH_BASE % getpass.getuser()
RESULTS_FILE = "results.txt"
MACHINE_FILE = "machine.txt"
AUTOTEST_TARBALL = "autotest.tbz2"
@@ -55,16 +56,18 @@ class Result(object):
self._CopyFilesTo(dest_dir, self.perf_report_files)
def _GetKeyvals(self):
- generate_test_report = os.path.join(self._chromeos_root,
- "src",
- "platform",
- "crostestutils",
- "utils_py",
- "generate_test_report.py")
- command = ("python %s --no-color --csv %s" %
- (generate_test_report,
- self.results_dir))
- [_, out, _] = self._ce.RunCommand(command, return_output=True)
+ results_in_chroot = os.path.join(self._chromeos_root,
+ "chroot", "tmp")
+ if not self._temp_dir:
+ self._temp_dir = tempfile.mkdtemp(dir=results_in_chroot)
+ command = "cp -r {0}/* {1}".format(self.results_dir, self._temp_dir)
+ self._ce.RunCommand(command)
+
+ command = ("python generate_test_report --no-color --csv %s" %
+ (os.path.join("/tmp", os.path.basename(self._temp_dir))))
+ [_, out, _] = self._ce.ChrootRunCommand(self._chromeos_root,
+ command,
+ return_output=True)
keyvals_dict = {}
for line in out.splitlines():
tokens = re.split("=|,", line)
@@ -177,7 +180,9 @@ class Result(object):
self.retval = pickle.load(f)
# Untar the tarball to a temporary directory
- self._temp_dir = tempfile.mkdtemp()
+ self._temp_dir = tempfile.mkdtemp(dir=os.path.join(self._chromeos_root,
+ "chroot", "tmp"))
+
command = ("cd %s && tar xf %s" %
(self._temp_dir,
os.path.join(cache_dir, AUTOTEST_TARBALL)))
@@ -189,24 +194,25 @@ class Result(object):
self.perf_report_files = self._GetPerfReportFiles()
self._ProcessResults()
- def CleanUp(self):
+ def CleanUp(self, rm_chroot_tmp):
+ if rm_chroot_tmp:
+ command = "rm -rf %s" % self.results_dir
+ self._ce.RunCommand(command)
if self._temp_dir:
command = "rm -rf %s" % self._temp_dir
self._ce.RunCommand(command)
def StoreToCacheDir(self, cache_dir, machine_manager):
# Create the dir if it doesn't exist.
- command = "mkdir -p %s" % cache_dir
- ret = self._ce.RunCommand(command)
- if ret:
- raise Exception("Could not create cache dir: %s" % cache_dir)
- # Store to the cache directory.
- with open(os.path.join(cache_dir, RESULTS_FILE), "w") as f:
+ temp_dir = tempfile.mkdtemp()
+
+ # Store to the temp directory.
+ with open(os.path.join(temp_dir, RESULTS_FILE), "w") as f:
pickle.dump(self.out, f)
pickle.dump(self.err, f)
pickle.dump(self.retval, f)
- tarball = os.path.join(cache_dir, AUTOTEST_TARBALL)
+ tarball = os.path.join(temp_dir, AUTOTEST_TARBALL)
command = ("cd %s && "
"tar "
"--exclude=var/spool "
@@ -218,9 +224,22 @@ class Result(object):
# Store machine info.
# TODO(asharif): Make machine_manager a singleton, and don't pass it into
# this function.
- with open(os.path.join(cache_dir, MACHINE_FILE), "w") as f:
+ with open(os.path.join(temp_dir, MACHINE_FILE), "w") as f:
f.write(machine_manager.machine_checksum_string[self.label_name])
+ if os.path.exists(cache_dir):
+ command = "rm -rf {0}".format(cache_dir)
+ self._ce.RunCommand(command)
+
+ command = "mkdir -p {0} && ".format(os.path.dirname(cache_dir))
+ command += "mv {0} {1}".format(temp_dir, cache_dir)
+ ret = self._ce.RunCommand(command)
+ if ret:
+ command = "rm -rf {0}".format(temp_dir)
+ self._ce.RunCommand(command)
+ raise Exception("Could not move dir %s to dir %s" %
+ (temp_dir, cache_dir))
+
@classmethod
def CreateFromRun(cls, logger, chromeos_root, board, label_name,
out, err, retval):
@@ -273,7 +292,7 @@ class ResultsCache(object):
def Init(self, chromeos_image, chromeos_root, autotest_name, iteration,
autotest_args, machine_manager, board, cache_conditions,
- logger_to_use, label):
+ logger_to_use, label, share_users):
self.chromeos_image = chromeos_image
self.chromeos_root = chromeos_root
self.autotest_name = autotest_name
@@ -285,10 +304,12 @@ class ResultsCache(object):
self._logger = logger_to_use
self._ce = command_executer.GetCommandExecuter(self._logger)
self.label = label
+ self.share_users = share_users
def _GetCacheDirForRead(self):
- glob_path = self._FormCacheDir(self._GetCacheKeyList(True))
- matching_dirs = glob.glob(glob_path)
+ matching_dirs = []
+ for glob_path in self._FormCacheDir(self._GetCacheKeyList(True)):
+ matching_dirs += glob.glob(glob_path)
if matching_dirs:
# Cache file found.
@@ -297,12 +318,21 @@ class ResultsCache(object):
return None
def _GetCacheDirForWrite(self):
- return self._FormCacheDir(self._GetCacheKeyList(False))
+ return self._FormCacheDir(self._GetCacheKeyList(False))[0]
def _FormCacheDir(self, list_of_strings):
cache_key = " ".join(list_of_strings)
cache_dir = misc.GetFilenameFromString(cache_key)
- cache_path = os.path.join(SCRATCH_DIR, cache_dir)
+ if self.label.cache_dir:
+ cache_home = os.path.abspath(os.path.expanduser(self.label.cache_dir))
+ cache_path = [os.path.join(cache_home, cache_dir)]
+ else:
+ cache_path = [os.path.join(SCRATCH_DIR, cache_dir)]
+
+ for i in [x.strip() for x in self.share_users.split(",")]:
+ path = SCRATCH_BASE % i
+ cache_path.append(os.path.join(path, cache_dir))
+
return cache_path
def _GetCacheKeyList(self, read):
@@ -313,7 +343,7 @@ class ResultsCache(object):
if read and CacheConditions.CHECKSUMS_MATCH not in self.cache_conditions:
checksum = "*"
else:
- checksum = ImageChecksummer().Checksum(self.chromeos_image)
+ checksum = ImageChecksummer().Checksum(self.label)
if read and CacheConditions.IMAGE_PATH_MATCH not in self.cache_conditions:
image_path_checksum = "*"
diff --git a/crosperf/results_organizer.py b/crosperf/results_organizer.py
index 810186b2..2e5c9296 100644
--- a/crosperf/results_organizer.py
+++ b/crosperf/results_organizer.py
@@ -19,6 +19,11 @@ class ResultOrganizer(object):
[
]}.
"""
+ key_filter = ["milliseconds_",
+ "retval",
+ "iterations",
+ "ms_",
+ "score_"]
def __init__(self, benchmark_runs, labels, benchmarks=None):
self.result = {}
@@ -43,7 +48,15 @@ class ResultOrganizer(object):
cur_dict = cur_table[index]
if not benchmark_run.result:
continue
+ benchmark = benchmark_run.benchmark
+ key_filter_on = (benchmark.key_results_only and
+ "PyAutoPerfTest" in benchmark.name + benchmark.autotest_name and
+ "perf." not in benchmark.autotest_args)
for autotest_key in benchmark_run.result.keyvals:
+ if (key_filter_on and
+ not any([key for key in self.key_filter if key in autotest_key])
+ ):
+ continue
result_value = benchmark_run.result.keyvals[autotest_key]
cur_dict[autotest_key] = result_value
self._DuplicatePass()
diff --git a/crosperf/results_report.py b/crosperf/results_report.py
index f7434132..61c67d5b 100644
--- a/crosperf/results_report.py
+++ b/crosperf/results_report.py
@@ -11,6 +11,7 @@ from perf_table import PerfTable
class ResultsReport(object):
MAX_COLOR_CODE = 255
+ PERF_ROWS = 5
def __init__(self, experiment):
self.experiment = experiment
@@ -40,7 +41,13 @@ class ResultsReport(object):
Column(AmeanResult(),
Format()),
Column(StdResult(),
- Format())
+ Format(), "StdDev"),
+ Column(CoeffVarResult(),
+ CoeffVarFormat(), "StdDev/Mean"),
+ Column(GmeanRatioResult(),
+ RatioFormat(), "GmeanSpeedup"),
+ Column(PValueResult(),
+ PValueFormat(), "p-value")
]
if not perf:
return self._GetTables(self.labels, self.benchmark_runs, columns)
@@ -138,10 +145,12 @@ class ResultsReport(object):
ben_table = self._GetTableHeader(ben)
tables.append(ben_table)
benchmark_data = p_table.perf_data[benchmark]
+ row_info = p_table.row_info[benchmark]
table = []
for event in benchmark_data:
- tg = TableGenerator(benchmark_data[event], label_names)
- table = tg.GetTable()
+ tg = TableGenerator(benchmark_data[event], label_names,
+ sort=TableGenerator.SORT_BY_VALUES_DESC)
+ table = tg.GetTable(max(self.PERF_ROWS, row_info[event]))
parsed_columns = self._ParseColumn(columns, ben.iterations)
tf = TableFormatter(table, parsed_columns)
tf.GenerateCellTable()
diff --git a/crosperf/settings_factory.py b/crosperf/settings_factory.py
index 11fa4b4b..924bc114 100644
--- a/crosperf/settings_factory.py
+++ b/crosperf/settings_factory.py
@@ -26,6 +26,12 @@ class BenchmarkSettings(Settings):
self.AddField(FloatField("outlier_range", default=0.2,
description="The percentage of highest/lowest "
"values to omit when computing the average."))
+ self.AddField(BooleanField("rm_chroot_tmp", default=False,
+ description="Whether remove the run_remote_test"
+ "result in the chroot"))
+ self.AddField(BooleanField("key_results_only", default=True,
+ description="Whether only show the key results"
+ "of pyautoperf"))
self.AddField(TextField("perf_args", default="",
description="The optional profile command. It "
"enables perf commands to record perforamance "
@@ -44,6 +50,8 @@ class LabelSettings(Settings):
"contains a src/scripts directory. Defaults to "
"the chromeos checkout which contains the "
"chromeos_image."))
+ self.AddField(TextField("md5sum", default="",
+ description="The md5sum of this image"))
self.AddField(TextField("board", required=True, description="The target "
"board for running experiments on, e.g. x86-alex."))
self.AddField(ListField("remote", description=
@@ -53,6 +61,8 @@ class LabelSettings(Settings):
default="",
description="Extra arguments to pass to "
"image_chromeos.py."))
+ self.AddField(TextField("cache_dir", default="",
+ description="The cache dir for this image."))
class GlobalSettings(Settings):
@@ -69,6 +79,9 @@ class GlobalSettings(Settings):
self.AddField(BooleanField("rerun_if_failed", description="Whether to "
"re-run failed autotest runs or not.",
default=False))
+ self.AddField(BooleanField("rm_chroot_tmp", default=False,
+ description="Whether remove the run_remote_test"
+ "result in the chroot"))
self.AddField(ListField("email", description="Space-seperated"
"list of email addresses to send email to."))
self.AddField(BooleanField("rerun", description="Whether to ignore the "
@@ -89,11 +102,27 @@ class GlobalSettings(Settings):
"contains a src/scripts directory. Defaults to "
"the chromeos checkout which contains the "
"chromeos_image."))
+ self.AddField(BooleanField("key_results_only", default=True,
+ description="Whether only show the key results"
+ "of pyautoperf"))
+ self.AddField(IntegerField("acquire_timeout", default=0,
+ description="Number of seconds to wait for "
+ "machine before exit if all the machines in "
+ "the experiment file are busy. Default is 0"))
self.AddField(TextField("perf_args", default="",
description="The optional profile command. It "
"enables perf commands to record perforamance "
"related counters. It must start with perf "
"command record or stat followed by arguments."))
+ self.AddField(TextField("cache_dir", default="",
+ description="The abs path of cache dir. "
+ "Default is /home/$(whoami)/cros_scratch."))
+ self.AddField(BooleanField("no_email", default=False,
+ description="Whether to disable the email to "
+ "user after crosperf finishes."))
+ self.AddField(TextField("share_users", default="",
+ description="Who's cache data you want to "
+ "use. It accepts multiple users seperated by \",\""))
class SettingsFactory(object):