aboutsummaryrefslogtreecommitdiff
path: root/crosperf
diff options
context:
space:
mode:
authorAhmad Sharif <asharif@chromium.org>2012-10-09 17:48:09 -0700
committerAhmad Sharif <asharif@chromium.org>2012-10-09 17:48:09 -0700
commitf395c26437cbdabc2960447fba89b226f4409e82 (patch)
treec67df25ac39b2c2b3d6d9aa847391edebbb0c478 /crosperf
parenta171f8d10c0507127de54c60076f1f59feef3629 (diff)
downloadtoolchain-utils-f395c26437cbdabc2960447fba89b226f4409e82.tar.gz
Synced repos to: 63271
Diffstat (limited to 'crosperf')
-rw-r--r--crosperf/autotest_runner.py4
-rw-r--r--crosperf/benchmark.py5
-rw-r--r--crosperf/benchmark_run.py31
-rw-r--r--crosperf/experiment.py8
-rw-r--r--crosperf/experiment_factory.py13
-rw-r--r--crosperf/experiment_file.py6
-rw-r--r--crosperf/experiment_files/aes_perf3
-rw-r--r--crosperf/experiment_files/bloat_perf3
-rw-r--r--crosperf/experiment_files/morejs_perf3
-rw-r--r--crosperf/experiment_files/page_cycler_perf3
-rw-r--r--crosperf/experiment_files/toolchain5
-rw-r--r--crosperf/experiment_runner.py16
-rw-r--r--crosperf/machine_manager.py121
-rw-r--r--crosperf/results_cache.py43
-rw-r--r--crosperf/results_columns.py152
-rw-r--r--crosperf/results_organizer.py42
-rw-r--r--crosperf/results_report.py292
-rw-r--r--crosperf/results_sorter.py4
-rw-r--r--crosperf/settings_factory.py31
-rw-r--r--crosperf/table.py84
20 files changed, 408 insertions, 461 deletions
diff --git a/crosperf/autotest_runner.py b/crosperf/autotest_runner.py
index 80fb2a24..5611b655 100644
--- a/crosperf/autotest_runner.py
+++ b/crosperf/autotest_runner.py
@@ -15,11 +15,15 @@ class AutotestRunner(object):
def Run(self, machine_name, chromeos_root, board, autotest_name,
autotest_args):
+ """Run the run_remote_test."""
options = ""
if board:
options += " --board=%s" % board
if autotest_args:
options += " %s" % autotest_args
+ command = "rm -rf /usr/local/autotest/results/*"
+ self._ce.CrosRunCommand(command, machine=machine_name, username="root",
+ chromeos_root=chromeos_root)
command = ("./run_remote_tests.sh --remote=%s %s %s" %
(machine_name, options, autotest_name))
return self._ce.ChrootRunCommand(chromeos_root, command, True, self._ct)
diff --git a/crosperf/benchmark.py b/crosperf/benchmark.py
index fa12d934..a75bd8e3 100644
--- a/crosperf/benchmark.py
+++ b/crosperf/benchmark.py
@@ -14,11 +14,10 @@ class Benchmark(object):
"""
def __init__(self, name, autotest_name, autotest_args, iterations,
- outlier_range, profile_counters, profile_type):
+ outlier_range, perf_args):
self.name = name
self.autotest_name = autotest_name
self.autotest_args = autotest_args
self.iterations = iterations
self.outlier_range = outlier_range
- self.profile_counters = profile_counters
- self.profile_type = profile_type
+ self.perf_args = perf_args
diff --git a/crosperf/benchmark_run.py b/crosperf/benchmark_run.py
index c20b24e0..7579b6c2 100644
--- a/crosperf/benchmark_run.py
+++ b/crosperf/benchmark_run.py
@@ -8,12 +8,12 @@ import re
import threading
import time
import traceback
-from results_cache import Result
-from utils import logger
-from utils import command_executer
+
from autotest_runner import AutotestRunner
+from results_cache import Result
from results_cache import ResultsCache
-
+from utils import command_executer
+from utils import logger
STATUS_FAILED = "FAILED"
STATUS_SUCCEEDED = "SUCCEEDED"
@@ -26,7 +26,7 @@ STATUS_PENDING = "PENDING"
class BenchmarkRun(threading.Thread):
def __init__(self, name, benchmark_name, autotest_name, autotest_args,
label_name, chromeos_root, chromeos_image, board, iteration,
- cache_conditions, outlier_range, profile_counters, profile_type,
+ cache_conditions, outlier_range, perf_args,
machine_manager,
logger_to_use):
threading.Thread.__init__(self)
@@ -45,8 +45,7 @@ class BenchmarkRun(threading.Thread):
self.status = STATUS_PENDING
self.run_completed = False
self.outlier_range = outlier_range
- self.profile_counters = profile_counters
- self.profile_type = profile_type
+ self.perf_args = perf_args
self.machine_manager = machine_manager
self.cache = ResultsCache()
self.autotest_runner = AutotestRunner(self._logger)
@@ -68,10 +67,11 @@ class BenchmarkRun(threading.Thread):
self.autotest_name,
self.iteration,
self.autotest_args,
- self.machine_manager.GetMachines()[0].name,
+ self.machine_manager,
self.board,
self.cache_conditions,
- self._logger)
+ self._logger,
+ )
self.result = self.cache.ReadResult()
self.cache_hit = (self.result is not None)
@@ -134,13 +134,12 @@ class BenchmarkRun(threading.Thread):
return machine
def _GetExtraAutotestArgs(self):
- if self.profile_type:
- if self.profile_type == "record":
- perf_args = "record -a -e %s" % ",".join(self.profile_counters)
- elif self.profile_type == "stat":
- perf_args = "stat -a"
- else:
- raise Exception("profile_type must be either record or stat")
+ if self.perf_args:
+ perf_args_list = self.perf_args.split(" ")
+ perf_args_list = [perf_args_list[0]] + ["-a"] + perf_args_list[1:]
+ perf_args = " ".join(perf_args_list)
+ if not perf_args_list[0] in ["record", "stat"]:
+ raise Exception("perf_args must start with either record or stat")
extra_autotest_args = ["--profiler=custom_perf",
("--profiler_args='perf_options=\"%s\"'" %
perf_args)]
diff --git a/crosperf/experiment.py b/crosperf/experiment.py
index 3f68f8be..7b48344c 100644
--- a/crosperf/experiment.py
+++ b/crosperf/experiment.py
@@ -18,7 +18,7 @@ class Experiment(object):
def __init__(self, name, remote, rerun_if_failed, working_directory,
chromeos_root, cache_conditions, labels, benchmarks,
- experiment_file):
+ experiment_file, email_to):
self.name = name
self.rerun_if_failed = rerun_if_failed
self.working_directory = working_directory
@@ -26,6 +26,7 @@ class Experiment(object):
self.chromeos_root = chromeos_root
self.cache_conditions = cache_conditions
self.experiment_file = experiment_file
+ self.email_to = email_to
self.results_directory = os.path.join(self.working_directory,
self.name + "_results")
@@ -48,6 +49,8 @@ class Experiment(object):
for machine in remote:
self.machine_manager.AddMachine(machine)
+ self.machine_manager.ComputeCommonCheckSum()
+ self.machine_manager.ComputeCommonCheckSumString()
self.start_time = None
self.benchmark_runs = self._GenerateBenchmarkRuns()
@@ -76,8 +79,7 @@ class Experiment(object):
iteration,
self.cache_conditions,
benchmark.outlier_range,
- benchmark.profile_counters,
- benchmark.profile_type,
+ benchmark.perf_args,
self.machine_manager,
logger_to_use)
diff --git a/crosperf/experiment_factory.py b/crosperf/experiment_factory.py
index d3c717ae..5c21179e 100644
--- a/crosperf/experiment_factory.py
+++ b/crosperf/experiment_factory.py
@@ -33,7 +33,7 @@ class ExperimentFactory(object):
if global_settings.GetField("rerun"):
cache_conditions.append(CacheConditions.FALSE)
if global_settings.GetField("exact_remote"):
- cache_conditions.append(CacheConditions.REMOTES_MATCH)
+ cache_conditions.append(CacheConditions.MACHINES_MATCH)
# Construct benchmarks.
benchmarks = []
@@ -46,11 +46,9 @@ class ExperimentFactory(object):
autotest_args = benchmark_settings.GetField("autotest_args")
iterations = benchmark_settings.GetField("iterations")
outlier_range = benchmark_settings.GetField("outlier_range")
- profile_counters = benchmark_settings.GetField("profile_counters")
- profile_type = benchmark_settings.GetField("profile_type")
+ perf_args = benchmark_settings.GetField("perf_args")
benchmark = Benchmark(benchmark_name, autotest_name, autotest_args,
- iterations, outlier_range, profile_counters,
- profile_type)
+ iterations, outlier_range, perf_args)
benchmarks.append(benchmark)
# Construct labels.
@@ -64,9 +62,12 @@ class ExperimentFactory(object):
label = Label(label_name, image, chromeos_root, board)
labels.append(label)
+ email = global_settings.GetField("email")
+
experiment = Experiment(experiment_name, remote, rerun_if_failed,
working_directory, chromeos_root,
cache_conditions, labels, benchmarks,
- experiment_file.Canonicalize())
+ experiment_file.Canonicalize(),
+ email)
return experiment
diff --git a/crosperf/experiment_file.py b/crosperf/experiment_file.py
index bde2a4d7..fc0f16ab 100644
--- a/crosperf/experiment_file.py
+++ b/crosperf/experiment_file.py
@@ -4,6 +4,7 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+import os.path
import re
from settings import Settings
from settings_factory import SettingsFactory
@@ -143,6 +144,11 @@ class ExperimentFile(object):
field = settings.fields[field_name]
if field.assigned:
res += "\t%s: %s\n" % (field.name, field.GetString())
+ if field.name == "chromeos_image":
+ real_file = (os.path.realpath
+ (os.path.expanduser(field.GetString())))
+ if real_file != field.GetString():
+ res += "\t#actual_image: %s\n" % real_file
res += "}\n\n"
return res
diff --git a/crosperf/experiment_files/aes_perf b/crosperf/experiment_files/aes_perf
index b298e91c..0c54ccbd 100644
--- a/crosperf/experiment_files/aes_perf
+++ b/crosperf/experiment_files/aes_perf
@@ -1,8 +1,7 @@
# This experiment just runs a short autotest which measures the performance of
# aes encryption. In addition, it profiles
-profile_type: record
-profile_counters: instructions cycles
+profile_args: record -e cycles -e instructions
benchmark: platform_AesThroughput {
}
diff --git a/crosperf/experiment_files/bloat_perf b/crosperf/experiment_files/bloat_perf
index a95d2cbf..f8258ee1 100644
--- a/crosperf/experiment_files/bloat_perf
+++ b/crosperf/experiment_files/bloat_perf
@@ -1,5 +1,4 @@
-profile_type: record
-profile_counters: cycles instructions
+perf_args: record -e cycles
benchmark: bloat {
autotest_name: desktopui_PyAutoPerfTests
diff --git a/crosperf/experiment_files/morejs_perf b/crosperf/experiment_files/morejs_perf
index d7ab45bb..a02f15f5 100644
--- a/crosperf/experiment_files/morejs_perf
+++ b/crosperf/experiment_files/morejs_perf
@@ -1,5 +1,4 @@
-profile_type: record
-profile_counters: cycles instructions
+perf_args: record -e cycles
benchmark: morejs {
autotest_name: desktopui_PyAutoPerfTests
diff --git a/crosperf/experiment_files/page_cycler_perf b/crosperf/experiment_files/page_cycler_perf
index 866fb751..7f5e7118 100644
--- a/crosperf/experiment_files/page_cycler_perf
+++ b/crosperf/experiment_files/page_cycler_perf
@@ -1,7 +1,6 @@
# This experiment profiles all page cyclers.
-profile_type: record
-profile_counters: cycles
+perf_args: record -e cycles
benchmark: morejs {
autotest_name: desktopui_PyAutoPerfTests
diff --git a/crosperf/experiment_files/toolchain b/crosperf/experiment_files/toolchain
index c6790505..9156998b 100644
--- a/crosperf/experiment_files/toolchain
+++ b/crosperf/experiment_files/toolchain
@@ -5,11 +5,12 @@ benchmark: bvt {
}
benchmark: suite_Smoke {
- auotest_name: suite:smoke
+ autotest_name: suite:smoke
}
benchmark: PyAutoPerfTests {
}
-benchmark: AndroidBench {
+benchmark: BootPerfServer {
+ autotest_name: ^server/site_tests/platform_BootPerfServer/control$
}
diff --git a/crosperf/experiment_runner.py b/crosperf/experiment_runner.py
index c5cd1ada..4219c435 100644
--- a/crosperf/experiment_runner.py
+++ b/crosperf/experiment_runner.py
@@ -53,7 +53,7 @@ class ExperimentRunner(object):
if not benchmark_run.cache_hit:
send_mail = True
break
- if not send_mail:
+ if not send_mail and not experiment.email_to:
return
label_names = []
@@ -61,11 +61,12 @@ class ExperimentRunner(object):
label_names.append(label.name)
subject = "%s: %s" % (experiment.name, " vs. ".join(label_names))
- text_report = TextResultsReport(experiment).GetReport()
+ text_report = TextResultsReport(experiment, True).GetReport()
text_report = "<pre style='font-size: 13px'>%s</pre>" % text_report
html_report = HTMLResultsReport(experiment).GetReport()
attachment = EmailSender.Attachment("report.html", html_report)
- EmailSender().SendEmail([getpass.getuser()],
+ email_to = [getpass.getuser()] + experiment.email_to
+ EmailSender().SendEmail(email_to,
subject,
text_report,
attachments=[attachment],
@@ -89,10 +90,11 @@ class ExperimentRunner(object):
self.l.LogOutput("Storing results of each benchmark run.")
for benchmark_run in experiment.benchmark_runs:
- benchmark_run_name = filter(str.isalnum, benchmark_run.name)
- benchmark_run_path = os.path.join(results_directory,
- benchmark_run_name)
- benchmark_run.result.CopyResultsTo(benchmark_run_path)
+ if benchmark_run.result:
+ benchmark_run_name = filter(str.isalnum, benchmark_run.name)
+ benchmark_run_path = os.path.join(results_directory,
+ benchmark_run_name)
+ benchmark_run.result.CopyResultsTo(benchmark_run_path)
def Run(self):
self._Run(self._experiment)
diff --git a/crosperf/machine_manager.py b/crosperf/machine_manager.py
index eb0b7539..8562e929 100644
--- a/crosperf/machine_manager.py
+++ b/crosperf/machine_manager.py
@@ -1,9 +1,12 @@
+import hashlib
+import image_chromeos
+import lock_machine
+import math
+import os.path
import sys
import threading
import time
from image_checksummer import ImageChecksummer
-import image_chromeos
-import lock_machine
from utils import command_executer
from utils import logger
from utils.file_utils import FileUtils
@@ -12,13 +15,90 @@ CHECKSUM_FILE = "/usr/local/osimage_checksum_file"
class CrosMachine(object):
- def __init__(self, name):
+ def __init__(self, name, chromeos_root):
self.name = name
self.image = None
self.checksum = None
self.locked = False
self.released_time = time.time()
self.autotest_run = None
+ self.chromeos_root = chromeos_root
+ self._GetMemoryInfo()
+ self._GetCPUInfo()
+ self._ComputeMachineChecksumString()
+ self._ComputeMachineChecksum()
+
+ def _ParseMemoryInfo(self):
+ line = self.meminfo.splitlines()[0]
+ usable_kbytes = int(line.split()[1])
+ # This code is from src/third_party/autotest/files/client/bin/base_utils.py
+ # usable_kbytes is system's usable DRAM in kbytes,
+ # as reported by memtotal() from device /proc/meminfo memtotal
+ # after Linux deducts 1.5% to 9.5% for system table overhead
+ # Undo the unknown actual deduction by rounding up
+ # to next small multiple of a big power-of-two
+ # eg 12GB - 5.1% gets rounded back up to 12GB
+ mindeduct = 0.005 # 0.5 percent
+ maxdeduct = 0.095 # 9.5 percent
+ # deduction range 1.5% .. 9.5% supports physical mem sizes
+ # 6GB .. 12GB in steps of .5GB
+ # 12GB .. 24GB in steps of 1 GB
+ # 24GB .. 48GB in steps of 2 GB ...
+ # Finer granularity in physical mem sizes would require
+ # tighter spread between min and max possible deductions
+
+ # increase mem size by at least min deduction, without rounding
+ min_kbytes = int(usable_kbytes / (1.0 - mindeduct))
+ # increase mem size further by 2**n rounding, by 0..roundKb or more
+ round_kbytes = int(usable_kbytes / (1.0 - maxdeduct)) - min_kbytes
+ # find least binary roundup 2**n that covers worst-cast roundKb
+ mod2n = 1 << int(math.ceil(math.log(round_kbytes, 2)))
+ # have round_kbytes <= mod2n < round_kbytes*2
+ # round min_kbytes up to next multiple of mod2n
+ phys_kbytes = min_kbytes + mod2n - 1
+ phys_kbytes -= phys_kbytes % mod2n # clear low bits
+ self.phys_kbytes = phys_kbytes
+
+ def _GetMemoryInfo(self):
+ #TODO yunlian: when the machine in rebooting, it will not return
+ #meminfo, the assert does not catch it either
+ ce = command_executer.GetCommandExecuter()
+ command = "cat /proc/meminfo"
+ ret, self.meminfo, _ = ce.CrosRunCommand(
+ command, return_output=True,
+ machine=self.name, username="root", chromeos_root=self.chromeos_root)
+ assert ret == 0, "Could not get meminfo from machine: %s" % self.name
+ if ret == 0:
+ self._ParseMemoryInfo()
+
+ #cpuinfo format is different across architecture
+ #need to find a better way to parse it.
+ def _ParseCPUInfo(self,cpuinfo):
+ return 0
+
+ def _GetCPUInfo(self):
+ ce = command_executer.GetCommandExecuter()
+ command = "cat /proc/cpuinfo"
+ ret, self.cpuinfo, _ = ce.CrosRunCommand(
+ command, return_output=True,
+ machine=self.name, username="root", chromeos_root=self.chromeos_root)
+ assert ret == 0, "Could not get cpuinfo from machine: %s" % self.name
+ if ret == 0:
+ self._ParseCPUInfo(self.cpuinfo)
+
+ def _ComputeMachineChecksumString(self):
+ self.checksum_string = ""
+ exclude_lines_list = ["MHz", "BogoMIPS", "bogomips"]
+ for line in self.cpuinfo.splitlines():
+ if not any([e in line for e in exclude_lines_list]):
+ self.checksum_string += line
+ self.checksum_string += " " + str(self.phys_kbytes)
+
+ def _ComputeMachineChecksum(self):
+ if self.checksum_string:
+ self.machine_checksum = hashlib.md5(self.checksum_string).hexdigest()
+ else:
+ self.machine_checksum = ""
def __str__(self):
l = []
@@ -38,7 +118,10 @@ class MachineManager(object):
self.image_lock = threading.Lock()
self.num_reimages = 0
self.chromeos_root = None
- self.no_lock = False
+ if os.path.isdir(lock_machine.FileLock.LOCKS_DIR):
+ self.no_lock = False
+ else:
+ self.no_lock = True
self.initialized = False
self.chromeos_root = chromeos_root
@@ -69,6 +152,20 @@ class MachineManager(object):
return retval
+ def ComputeCommonCheckSum(self):
+ self.machine_checksum = ""
+ for machine in self.GetMachines():
+ if machine.machine_checksum:
+ self.machine_checksum = machine.machine_checksum
+ break
+
+ def ComputeCommonCheckSumString(self):
+ self.machine_checksum_string = ""
+ for machine in self.GetMachines():
+ if machine.checksum_string:
+ self.machine_checksum_string = machine.checksum_string
+ break
+
def _TryToLockMachine(self, cros_machine):
with self._lock:
assert cros_machine, "Machine can't be None"
@@ -96,7 +193,14 @@ class MachineManager(object):
with self._lock:
for m in self._all_machines:
assert m.name != machine_name, "Tried to double-add %s" % machine_name
- self._all_machines.append(CrosMachine(machine_name))
+ cm = CrosMachine(machine_name, self.chromeos_root)
+ assert cm.machine_checksum, ("Could not find checksum for machine %s" %
+ machine_name)
+ self._all_machines.append(cm)
+
+ def AreAllMachineSame(self):
+ checksums = [m.machine_checksum for m in self.GetMachines()]
+ return len(set(checksums)) == 1
def AcquireMachine(self, chromeos_image):
image_checksum = ImageChecksummer().Checksum(chromeos_image)
@@ -109,12 +213,15 @@ class MachineManager(object):
for m in self._all_machines:
m.released_time = time.time()
+ if not self.AreAllMachineSame():
+ logger.GetLogger().LogFatal("-- not all the machine are identical")
if not self._machines:
machine_names = []
for machine in self._all_machines:
machine_names.append(machine.name)
- raise Exception("Could not acquire any of the following machines: '%s'"
- % ", ".join(machine_names))
+ logger.GetLogger().LogFatal("Could not acquire any of the"
+ "following machines: '%s'"
+ % ", ".join(machine_names))
### for m in self._machines:
### if (m.locked and time.time() - m.released_time < 10 and
diff --git a/crosperf/results_cache.py b/crosperf/results_cache.py
index 6fdca550..1c33e720 100644
--- a/crosperf/results_cache.py
+++ b/crosperf/results_cache.py
@@ -10,14 +10,15 @@ import pickle
import re
import tempfile
-from image_checksummer import ImageChecksummer
from utils import command_executer
from utils import logger
from utils import misc
+from image_checksummer import ImageChecksummer
SCRATCH_DIR = "/home/%s/cros_scratch" % getpass.getuser()
RESULTS_FILE = "results.txt"
+MACHINE_FILE = "machine.txt"
AUTOTEST_TARBALL = "autotest.tbz2"
PERF_RESULTS_FILE = "perf-results.txt"
@@ -65,7 +66,7 @@ class Result(object):
[ret, out, err] = self._ce.RunCommand(command, return_output=True)
keyvals_dict = {}
for line in out.splitlines():
- tokens = line.split(",")
+ tokens = re.split("=|,", line)
key = tokens[-2]
if key.startswith(self.results_dir):
key = key[len(self.results_dir) + 1:]
@@ -109,6 +110,7 @@ class Result(object):
chroot_perf_report_file = misc.GetInsideChrootPath(self._chromeos_root,
perf_report_file)
command = ("/usr/sbin/perf report "
+ "-n "
"--symfs /build/%s "
"--vmlinux /build/%s/usr/lib/debug/boot/vmlinux "
"--kallsyms /build/%s/boot/System.map-* "
@@ -186,6 +188,7 @@ class Result(object):
self.perf_data_files = self._GetPerfDataFiles()
self.perf_report_files = self._GetPerfReportFiles()
self._ProcessResults()
+ self.CleanUp()
def CleanUp(self):
if self._temp_dir:
@@ -193,7 +196,7 @@ class Result(object):
self._ce.RunCommand(command)
- def StoreToCacheDir(self, cache_dir):
+ def StoreToCacheDir(self, cache_dir, machine_manager):
# Create the dir if it doesn't exist.
command = "mkdir -p %s" % cache_dir
ret = self._ce.RunCommand(command)
@@ -206,10 +209,19 @@ class Result(object):
pickle.dump(self.retval, f)
tarball = os.path.join(cache_dir, AUTOTEST_TARBALL)
- command = ("cd %s && tar cjf %s ." % (self.results_dir, tarball))
+ command = ("cd %s && "
+ "tar "
+ "--exclude=var/spool "
+ "--exclude=var/log "
+ "-cjf %s ." % (self.results_dir, tarball))
ret = self._ce.RunCommand(command)
if ret:
raise Exception("Couldn't store autotest output directory.")
+ # Store machine info.
+ # TODO(asharif): Make machine_manager a singleton, and don't pass it into
+ # this function.
+ with open(os.path.join(cache_dir, MACHINE_FILE), "w") as f:
+ f.write(machine_manager.machine_checksum_string)
@classmethod
def CreateFromRun(cls, logger, chromeos_root, board, out, err, retval):
@@ -232,8 +244,9 @@ class CacheConditions(object):
# Cache hit only if the result file exists.
CACHE_FILE_EXISTS = 0
- # Cache hit if the ip address of the cached result and the new run match.
- REMOTES_MATCH = 1
+ # Cache hit if the checksum of cpuinfo and totalmem of
+ # the cached result and the new run match.
+ MACHINES_MATCH = 1
# Cache hit if the image checksum of the cached result and the new run match.
CHECKSUMS_MATCH = 2
@@ -253,18 +266,19 @@ class ResultsCache(object):
is exactly stored (value). The value generation is handled by the Results
class.
"""
- CACHE_VERSION = 3
+ CACHE_VERSION = 5
+
def Init(self, chromeos_image, chromeos_root, autotest_name, iteration,
- autotest_args, remote, board, cache_conditions,
+ autotest_args, machine_manager, board, cache_conditions,
logger_to_use):
self.chromeos_image = chromeos_image
self.chromeos_root = chromeos_root
self.autotest_name = autotest_name
self.iteration = iteration
self.autotest_args = autotest_args,
- self.remote = remote
self.board = board
self.cache_conditions = cache_conditions
+ self.machine_manager = machine_manager
self._logger = logger_to_use
self._ce = command_executer.GetCommandExecuter(self._logger)
@@ -291,10 +305,10 @@ class ResultsCache(object):
return cache_path
def _GetCacheKeyList(self, read):
- if read and CacheConditions.REMOTES_MATCH not in self.cache_conditions:
- remote = "*"
+ if read and CacheConditions.MACHINES_MATCH not in self.cache_conditions:
+ machine_checksum = "*"
else:
- remote = self.remote
+ machine_checksum = self.machine_manager.machine_checksum
if read and CacheConditions.CHECKSUMS_MATCH not in self.cache_conditions:
checksum = "*"
else:
@@ -307,12 +321,11 @@ class ResultsCache(object):
autotest_args_checksum = hashlib.md5(
"".join(self.autotest_args)).hexdigest()
-
return (image_path_checksum,
self.autotest_name, str(self.iteration),
autotest_args_checksum,
checksum,
- remote,
+ machine_checksum,
str(self.CACHE_VERSION))
def ReadResult(self):
@@ -342,7 +355,7 @@ class ResultsCache(object):
def StoreResult(self, result):
cache_dir = self._GetCacheDirForWrite()
- result.StoreToCacheDir(cache_dir)
+ result.StoreToCacheDir(cache_dir, self.machine_manager)
class MockResultsCache(object):
diff --git a/crosperf/results_columns.py b/crosperf/results_columns.py
deleted file mode 100644
index 09e97d0b..00000000
--- a/crosperf/results_columns.py
+++ /dev/null
@@ -1,152 +0,0 @@
-#!/usr/bin/python
-
-# Copyright 2011 Google Inc. All Rights Reserved.
-
-import math
-
-
-class Column(object):
- def __init__(self, name):
- self.name = name
-
- def _ContainsString(self, results):
- for result in results:
- if isinstance(result, str):
- return True
- return False
-
- def _StripNone(self, results):
- res = []
- for result in results:
- if result is not None:
- res.append(result)
- return res
-
-
-class MinColumn(Column):
- def Compute(self, results, baseline_results):
- if self._ContainsString(results):
- return "-"
- results = self._StripNone(results)
- if not results:
- return "-"
- return min(results)
-
-
-class MaxColumn(Column):
- def Compute(self, results, baseline_results):
- if self._ContainsString(results):
- return "-"
- results = self._StripNone(results)
- if not results:
- return "-"
- return max(results)
-
-
-class MeanColumn(Column):
- def Compute(self, results, baseline_results):
- all_pass = True
- all_fail = True
- if self._ContainsString(results):
- for result in results:
- if result != "PASSED":
- all_pass = False
- if result != "FAILED":
- all_fail = False
-
- if all_pass:
- return "ALL PASS"
- elif all_fail:
- return "ALL FAIL"
- else:
- return "-"
-
- results = self._StripNone(results)
- if not results:
- return "-"
- return float(sum(results)) / len(results)
-
-
-class StandardDeviationColumn(Column):
- def __init__(self, name):
- super(StandardDeviationColumn, self).__init__(name)
-
- def Compute(self, results, baseline_results):
- if self._ContainsString(results):
- return "-"
-
- results = self._StripNone(results)
- if not results:
- return "-"
- n = len(results)
- average = sum(results) / n
- total = 0
- for result in results:
- total += (result - average) ** 2
-
- return math.sqrt(total / n)
-
-
-class RatioColumn(Column):
- def __init__(self, name):
- super(RatioColumn, self).__init__(name)
-
- def Compute(self, results, baseline_results):
- if self._ContainsString(results) or self._ContainsString(baseline_results):
- return "-"
-
- results = self._StripNone(results)
- baseline_results = self._StripNone(baseline_results)
- if not results or not baseline_results:
- return "-"
- result_mean = sum(results) / len(results)
- baseline_mean = sum(baseline_results) / len(baseline_results)
-
- if not baseline_mean:
- return "-"
-
- return result_mean / baseline_mean
-
-
-class DeltaColumn(Column):
- def __init__(self, name):
- super(DeltaColumn, self).__init__(name)
-
- def Compute(self, results, baseline_results):
- if self._ContainsString(results) or self._ContainsString(baseline_results):
- return "-"
-
- results = self._StripNone(results)
- baseline_results = self._StripNone(baseline_results)
- if not results or not baseline_results:
- return "-"
- result_mean = sum(results) / len(results)
- baseline_mean = sum(baseline_results) / len(baseline_results)
-
- if not baseline_mean:
- return "-"
-
- res = 100 * (result_mean - baseline_mean) / baseline_mean
- return res
-
-
-class IterationsCompleteColumn(Column):
- def __init__(self, name):
- super(IterationsCompleteColumn, self).__init__(name)
-
- def Compute(self, results, baseline_results):
- return len(self._StripNone(results))
-
-
-class IterationColumn(Column):
- def __init__(self, name, iteration):
- super(IterationColumn, self).__init__(name)
- self.iteration = iteration
-
- def Compute(self, results, baseline_results):
- if self.iteration > len(results):
- return ""
- res = results[self.iteration - 1]
- if not res:
- return "-"
- return res
diff --git a/crosperf/results_organizer.py b/crosperf/results_organizer.py
new file mode 100644
index 00000000..0071387b
--- /dev/null
+++ b/crosperf/results_organizer.py
@@ -0,0 +1,42 @@
+#!/usr/bin/python
+
+# Copyright 2012 Google Inc. All Rights Reserved.
+
+
+class ResultOrganizer(object):
+ """Create a dict from benchmark_runs.
+
+ The structure of the output dict is as follows:
+ {"benchmark_1":[
+ [{"key1":"v1", "key2":"v2"},{"key1":"v1", "key2","v2"}]
+ #one label
+ []
+ #the other label
+ ]
+ "benchmark_2":
+ [
+ ]}.
+ """
+
+ def __init__(self, benchmark_runs, labels):
+ self.result = {}
+ self.labels = []
+ for label in labels:
+ self.labels.append(label.name)
+ for benchmark_run in benchmark_runs:
+ benchmark_name = benchmark_run.benchmark_name
+ if benchmark_name not in self.result:
+ self.result[benchmark_name] = []
+ while len(self.result[benchmark_name]) < len(labels):
+ self.result[benchmark_name].append([])
+ label_index = self.labels.index(benchmark_run.label_name)
+ cur_table = self.result[benchmark_name][label_index]
+ index = benchmark_run.iteration - 1
+ while index >= len(cur_table):
+ cur_table.append({})
+ cur_dict = cur_table[index]
+ if not benchmark_run.result:
+ continue
+ for autotest_key in benchmark_run.result.keyvals:
+ result_value = benchmark_run.result.keyvals[autotest_key]
+ cur_dict[autotest_key] = result_value
diff --git a/crosperf/results_report.py b/crosperf/results_report.py
index 0cd46ed4..b591370a 100644
--- a/crosperf/results_report.py
+++ b/crosperf/results_report.py
@@ -2,20 +2,14 @@
# Copyright 2011 Google Inc. All Rights Reserved.
+import math
from column_chart import ColumnChart
-from results_columns import IterationColumn
-from results_columns import IterationsCompleteColumn
-from results_columns import MaxColumn
-from results_columns import MeanColumn
-from results_columns import MinColumn
-from results_columns import RatioColumn
-from results_columns import StandardDeviationColumn
from results_sorter import ResultSorter
-from table import Table
-
+from results_organizer import ResultOrganizer
+from utils.tabulator import *
class ResultsReport(object):
- DELTA_COLUMN_NAME = "Change"
+ MAX_COLOR_CODE = 255
def __init__(self, experiment):
self.experiment = experiment
@@ -32,85 +26,106 @@ class ResultsReport(object):
labels[benchmark_run.label_name].append(benchmark_run)
return labels
- def GetFullTable(self):
- full_columns = []
- max_iterations = 0
- for benchmark in self.benchmarks:
- if benchmark.iterations > max_iterations:
- max_iterations = benchmark.iterations
-
- for i in range(1, max_iterations + 1):
- full_columns.append(IterationColumn(str(i), i))
-
- full_columns.append(IterationsCompleteColumn("Completed"))
- full_columns.append(MinColumn("Min"))
- full_columns.append(MaxColumn("Max"))
- full_columns.append(MeanColumn("Avg"))
- full_columns.append(StandardDeviationColumn("Std Dev"))
- full_columns.append(RatioColumn(self.DELTA_COLUMN_NAME))
- return self._GetTable(self.labels, self.benchmarks, self.benchmark_runs,
- full_columns)
-
- def GetSummaryTable(self):
- summary_columns = [MeanColumn("Average"),
- RatioColumn(self.DELTA_COLUMN_NAME)]
- return self._GetTable(self.labels, self.benchmarks, self.benchmark_runs,
- summary_columns)
-
- def _GetTable(self, labels, benchmarks, benchmark_runs, columns):
- table = Table("box-table-a")
- label_headings = [Table.Cell("", hidden=True, colspan=2, header=True)]
- for label in labels:
- colspan = len(columns)
- if label.name == self.baseline.name:
- colspan -= 1
- label_headings.append(Table.Cell(label.name, colspan=colspan,
- header=True))
-
- table.AddRow(label_headings)
-
- column_headings = [Table.Cell("Autotest Key", header=True),
- Table.Cell("Iterations", header=True)]
- for label in labels:
- for column in columns:
- if (label.name == self.baseline.name and
- column.name == self.DELTA_COLUMN_NAME):
- continue
- column_headings.append(Table.Cell(column.name, header=True))
-
- table.AddRow(column_headings)
-
- sorter = ResultSorter(benchmark_runs)
-
- for benchmark in benchmarks:
- table.AddRow([Table.Cell(benchmark.name)])
- autotest_keys = sorter.GetAutotestKeys(benchmark.name)
- for autotest_key in autotest_keys:
- row = [Table.Cell(autotest_key),
- Table.Cell(benchmark.iterations)]
- for label in labels:
- for column in columns:
- if (label.name == self.baseline.name and
- column.name == self.DELTA_COLUMN_NAME):
- continue
- results = sorter.GetResults(benchmark.name,
- autotest_key, label.name)
- baseline_results = sorter.GetResults(benchmark.name,
- autotest_key,
- self.baseline.name)
- value = column.Compute(results, baseline_results)
- if isinstance(value, float):
- value_string = "%.2f" % value
- else:
- value_string = value
-
- row.append(Table.Cell(value_string))
-
- table.AddRow(row)
-
- return table
-
-
+ def GetFullTables(self):
+ columns = [Column(NonEmptyCountResult(),
+ Format(),
+ "Completed"),
+ Column(RawResult(),
+ Format()),
+ Column(MinResult(),
+ Format()),
+ Column(MaxResult(),
+ Format()),
+ Column(AmeanResult(),
+ Format()),
+ Column(StdResult(),
+ Format())
+ ]
+ return self._GetTables(self.labels, self.benchmark_runs, columns)
+
+ def GetSummaryTables(self):
+ columns = [Column(AmeanResult(),
+ Format()),
+ Column(StdResult(),
+ Format(), "StdDev"),
+ Column(CoeffVarResult(),
+ CoeffVarFormat(), "Mean/StdDev"),
+ Column(GmeanRatioResult(),
+ RatioFormat(), "GmeanSpeedup"),
+ Column(GmeanRatioResult(),
+ ColorBoxFormat(), " "),
+ Column(StatsSignificant(),
+ Format(), "p-value")
+ ]
+ return self._GetTables(self.labels, self.benchmark_runs, columns)
+
+ def _ParseColumn(self, columns, iteration):
+ new_column = []
+ for column in columns:
+ if column.result.__class__.__name__ != "RawResult":
+ #TODO(asharif): tabulator should support full table natively.
+ new_column.append(column)
+ else:
+ for i in range(iteration):
+ cc = Column(LiteralResult(i), Format(), str(i+1))
+ new_column.append(cc)
+ return new_column
+
+ def _AreAllRunsEmpty(self, runs):
+ for label in runs:
+ for dictionary in label:
+ if dictionary:
+ return False
+ return True
+
+ def _GetTables(self, labels, benchmark_runs, columns):
+ tables = []
+ ro = ResultOrganizer(benchmark_runs, labels)
+ result = ro.result
+ label_name = ro.labels
+ for item in result:
+ runs = result[item]
+ for benchmark in self.benchmarks:
+ if benchmark.name == item:
+ break
+ benchmark_info = ("Benchmark: {0}; Iterations: {1}"
+ .format(benchmark.name, benchmark.iterations))
+ cell = Cell()
+ cell.string_value = benchmark_info
+ ben_table = [[cell]]
+
+ if self._AreAllRunsEmpty(runs):
+ cell = Cell()
+ cell.string_value = ("This benchmark contains no result."
+ " Is the benchmark name valid?")
+ cell_table = [[cell]]
+ else:
+ tg = TableGenerator(runs, label_name)
+ table = tg.GetTable()
+ parsed_columns = self._ParseColumn(columns, benchmark.iterations)
+ tf = TableFormatter(table, parsed_columns)
+ cell_table = tf.GetCellTable()
+ tables.append(ben_table)
+ tables.append(cell_table)
+ return tables
+
+ def PrintTables(self, tables, out_to):
+ output = ""
+ for table in tables:
+ if out_to == "HTML":
+ tp = TablePrinter(table, TablePrinter.HTML)
+ elif out_to == "PLAIN":
+ tp = TablePrinter(table, TablePrinter.PLAIN)
+ elif out_to == "CONSOLE":
+ tp = TablePrinter(table, TablePrinter.CONSOLE)
+ elif out_to == "TSV":
+ tp = TablePrinter(table, TablePrinter.TSV)
+ elif out_to == "EMAIL":
+ tp = TablePrinter(table, TablePrinter.EMAIL)
+ else:
+ pass
+ output += tp.Print()
+ return output
class TextResultsReport(ResultsReport):
TEXT = """
===========================================
@@ -118,50 +133,36 @@ Results report for: '%s'
===========================================
-------------------------------------------
-Benchmark Run Status
--------------------------------------------
-%s
-
-Number re-images: %s
-
--------------------------------------------
Summary
-------------------------------------------
%s
-------------------------------------------
-Full Table
--------------------------------------------
-%s
-
--------------------------------------------
Experiment File
-------------------------------------------
%s
===========================================
"""
- def __init__(self, experiment):
+ def __init__(self, experiment, email=False):
super(TextResultsReport, self).__init__(experiment)
-
- def GetStatusTable(self):
- status_table = Table("status")
- for benchmark_run in self.benchmark_runs:
- status_table.AddRow([Table.Cell(benchmark_run.name),
- Table.Cell(benchmark_run.status),
- Table.Cell(benchmark_run.failure_reason)])
- return status_table
+ self.email = email
def GetReport(self):
+ summary_table = self.GetSummaryTables()
+ full_table = self.GetFullTables()
+ if not self.email:
+ return self.TEXT % (self.experiment.name,
+ self.PrintTables(summary_table, "CONSOLE"),
+ self.experiment.experiment_file)
+
return self.TEXT % (self.experiment.name,
- self.GetStatusTable().ToText(),
- self.experiment.machine_manager.num_reimages,
- self.GetSummaryTable().ToText(80),
- self.GetFullTable().ToText(80),
+ self.PrintTables(summary_table, "EMAIL"),
self.experiment.experiment_file)
class HTMLResultsReport(ResultsReport):
+
HTML = """
<html>
<head>
@@ -303,36 +304,49 @@ pre {
def GetReport(self):
chart_javascript = ""
- charts = self._GetCharts(self.labels, self.benchmarks, self.benchmark_runs)
+ charts = self._GetCharts(self.labels, self.benchmark_runs)
for chart in charts:
chart_javascript += chart.GetJavascript()
chart_divs = ""
for chart in charts:
chart_divs += chart.GetDiv()
- summary_table = self.GetSummaryTable()
- full_table = self.GetFullTable()
+ summary_table = self.GetSummaryTables()
+ full_table = self.GetFullTables()
return self.HTML % (chart_javascript,
- summary_table.ToHTML(),
- summary_table.ToText(),
- summary_table.ToTSV(),
+ self.PrintTables(summary_table, "HTML"),
+ self.PrintTables(summary_table, "PLAIN"),
+ self.PrintTables(summary_table, "TSV"),
self._GetTabMenuHTML("summary"),
chart_divs,
- full_table.ToHTML(),
- full_table.ToText(),
- full_table.ToTSV(),
+ self.PrintTables(full_table, "HTML"),
+ self.PrintTables(full_table, "PLAIN"),
+ self.PrintTables(full_table, "TSV"),
self._GetTabMenuHTML("full"),
self.experiment.experiment_file)
- def _GetCharts(self, labels, benchmarks, benchmark_runs):
+ def _GetCharts(self, labels, benchmark_runs):
charts = []
- sorter = ResultSorter(benchmark_runs)
-
- for benchmark in benchmarks:
- autotest_keys = sorter.GetAutotestKeys(benchmark.name)
-
- for autotest_key in autotest_keys:
- title = "%s: %s" % (benchmark.name, autotest_key.replace("/", " "))
+ ro = ResultOrganizer(benchmark_runs, labels)
+ result = ro.result
+ for item in result:
+ runs = result[item]
+ tg = TableGenerator(runs, ro.labels)
+ table = tg.GetTable()
+ columns = [Column(AmeanResult(),
+ Format()),
+ Column(MinResult(),
+ Format()),
+ Column(MaxResult(),
+ Format())
+ ]
+ tf = TableFormatter(table, columns)
+ data_table = tf.GetCellTable()
+
+ for i in range(2, len(data_table)):
+ cur_row_data = data_table[i]
+ autotest_key = cur_row_data[0].string_value
+ title = "{0}: {1}".format(item, autotest_key.replace("/", ""))
chart = ColumnChart(title, 300, 200)
chart.AddColumn("Label", "string")
chart.AddColumn("Average", "number")
@@ -340,17 +354,15 @@ pre {
chart.AddColumn("Max", "number")
chart.AddSeries("Min", "line", "black")
chart.AddSeries("Max", "line", "black")
-
- for label in labels:
- res = sorter.GetResults(benchmark.name, autotest_key, label.name)
- avg_val = MeanColumn("").Compute(res, None)
- min_val = MinColumn("").Compute(res, None)
- max_val = MaxColumn("").Compute(res, None)
- chart.AddRow([label.name, avg_val, min_val, max_val])
- if isinstance(avg_val, str):
+ cur_index = 1
+ for label in ro.labels:
+ chart.AddRow([label, cur_row_data[cur_index].value,
+ cur_row_data[cur_index + 1].value,
+ cur_row_data[cur_index + 2].value])
+ if isinstance(cur_row_data[cur_index].value, str):
chart = None
break
-
+ cur_index += 3
if chart:
charts.append(chart)
return charts
diff --git a/crosperf/results_sorter.py b/crosperf/results_sorter.py
index 0567ef7b..985a91fb 100644
--- a/crosperf/results_sorter.py
+++ b/crosperf/results_sorter.py
@@ -9,6 +9,8 @@ class ResultSorter(object):
for benchmark_run in benchmark_runs:
benchmark_name = benchmark_run.benchmark_name
label_name = benchmark_run.label_name
+ if not benchmark_run.result:
+ continue
for autotest_key in benchmark_run.result.keyvals:
result_tuple = (benchmark_name, autotest_key, label_name)
if result_tuple not in self.table:
@@ -32,6 +34,8 @@ class ResultSorter(object):
benchmark_name = benchmark_run.benchmark_name
if benchmark_name not in self.autotest_keys:
self.autotest_keys[benchmark_name] = {}
+ if not benchmark_run.result:
+ continue
for autotest_key in benchmark_run.result.keyvals:
self.autotest_keys[benchmark_name][autotest_key] = True
diff --git a/crosperf/settings_factory.py b/crosperf/settings_factory.py
index 95ceb0fd..782f0dd3 100644
--- a/crosperf/settings_factory.py
+++ b/crosperf/settings_factory.py
@@ -26,15 +26,11 @@ class BenchmarkSettings(Settings):
self.AddField(FloatField("outlier_range", default=0.2,
description="The percentage of highest/lowest "
"values to omit when computing the average."))
- self.AddField(ListField("profile_counters",
- default=["cycles"],
- description="A list of profile counters to "
- "collect."))
- self.AddField(EnumField("profile_type",
- description="The type of profile to collect. "
- "Either 'stat', 'record' or ''.",
- options=["stat", "record", ""],
- default=""))
+ self.AddField(TextField("perf_args", default="",
+ description="The optional profile command. It "
+ "enables perf commands to record perforamance "
+ "related counters. It must start with perf "
+ "command record or stat followed by arguments."))
class LabelSettings(Settings):
@@ -66,10 +62,12 @@ class GlobalSettings(Settings):
self.AddField(BooleanField("rerun_if_failed", description="Whether to "
"re-run failed autotest runs or not.",
default=False))
+ self.AddField(ListField("email", description="Space-seperated"
+ "list of email addresses to send email to."))
self.AddField(BooleanField("rerun", description="Whether to ignore the "
"cache and for autotests to be re-run.",
default=False))
- self.AddField(BooleanField("exact_remote", default=False,
+ self.AddField(BooleanField("exact_remote", default=True,
description="Ensure cached runs are run on the "
"same device that is specified as a remote."))
self.AddField(IntegerField("iterations", default=1,
@@ -80,14 +78,11 @@ class GlobalSettings(Settings):
"contains a src/scripts directory. Defaults to "
"the chromeos checkout which contains the "
"chromeos_image."))
- self.AddField(ListField("profile_counters",
- default=["cycles"],
- description="A list of profile counters to "
- "collect."))
- self.AddField(EnumField("profile_type",
- description="The type of profile to collect. "
- "Either 'stat', 'record' or ''.",
- options=["stat", "record", ""]))
+ self.AddField(TextField("perf_args", default="",
+ description="The optional profile command. It "
+ "enables perf commands to record perforamance "
+ "related counters. It must start with perf "
+ "command record or stat followed by arguments."))
class SettingsFactory(object):
diff --git a/crosperf/table.py b/crosperf/table.py
deleted file mode 100644
index 84eb21ae..00000000
--- a/crosperf/table.py
+++ /dev/null
@@ -1,84 +0,0 @@
-#!/usr/bin/python
-
-# Copyright 2011 Google Inc. All Rights Reserved.
-
-import math
-
-
-class Table(object):
- class Cell(object):
- def __init__(self, value, colspan=1, hidden=False, header=False):
- self.value = value
- self.colspan = colspan
- self.hidden = hidden
- self.header = header
-
- def __init__(self, table_id):
- self.table_id = table_id
- self.rows = []
-
- def AddRow(self, row):
- self.rows.append(row)
-
- def ToHTML(self):
- res = "<table id='%s'>\n" % self.table_id
- for row in self.rows:
- res += "<tr>"
- for cell in row:
- if cell.header:
- tag = "th"
- else:
- tag = "td"
- cell_class = ""
- if cell.hidden:
- cell_class = "class='hidden'"
- res += "<%s colspan='%s' %s>%s</%s>" % (tag, cell.colspan, cell_class,
- cell.value, tag)
- res += "</tr>\n"
- res += "</table>"
- return res
-
- def ToText(self, max_column_width=None):
- col_spacing = 2
- max_widths = []
- for row in self.rows:
- column = 0
- for cell in row:
- text_width = len(str(cell.value))
- per_column_width = int(math.ceil(float(text_width) / cell.colspan))
- if max_column_width:
- per_column_width = min(max_column_width, per_column_width)
- for i in range(column, column + cell.colspan):
- while i >= len(max_widths):
- max_widths.append(0)
- max_widths[i] = max(per_column_width, max_widths[i])
- column += cell.colspan
-
- res = ""
- for row in self.rows:
- column = 0
- for cell in row:
- val = str(cell.value)
- if max_column_width:
- if len(val) > max_column_width:
- val = val[:2] + ".." + val[len(val) - (max_column_width - 4):]
- res += val
- space_to_use = (sum(max_widths[column:column + cell.colspan]) +
- (cell.colspan * col_spacing))
- whitespace_length = space_to_use - len(val)
- res += " " * whitespace_length
- # Add space b/w columns
- column += cell.colspan
- res += "\n"
- return res
-
- def ToTSV(self):
- res = ""
- column = 0
- for row in self.rows:
- for cell in row:
- res += str(cell.value).replace("\t", " ")
- for _ in range(column, column + cell.colspan):
- res += "\t"
- res += "\n"
- return res