aboutsummaryrefslogtreecommitdiff
path: root/crosperf
diff options
context:
space:
mode:
authorChristopher Di Bella <cjdb@google.com>2021-03-18 20:31:06 +0000
committerCaroline Tice <cmtice@chromium.org>2021-04-14 04:45:03 +0000
commit53e9fbebdfb841c09f2ff0cc614c6ee579a36ee2 (patch)
treec845e91025afcda87736fe063752a04ed871b28d /crosperf
parent529f4563821c370765a0d3c1898fcf8e324c0dd7 (diff)
downloadtoolchain-utils-53e9fbebdfb841c09f2ff0cc614c6ee579a36ee2.tar.gz
replaces skylab with crosfleet
BUG=chromium:1187326 TEST=Tested by hand on chrotomation.mtv Change-Id: I7e0bd2a99e85f288fb3b1b9ff52c8e38d25df245 Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/toolchain-utils/+/2773632 Reviewed-by: Caroline Tice <cmtice@chromium.org> Commit-Queue: Caroline Tice <cmtice@chromium.org> Tested-by: Caroline Tice <cmtice@chromium.org>
Diffstat (limited to 'crosperf')
-rwxr-xr-xcrosperf/benchmark_run_unittest.py74
-rw-r--r--crosperf/experiment.py19
-rw-r--r--crosperf/experiment_factory.py69
-rwxr-xr-xcrosperf/experiment_factory_unittest.py10
-rw-r--r--crosperf/experiment_runner.py20
-rw-r--r--crosperf/label.py8
-rw-r--r--crosperf/mock_instance.py4
-rw-r--r--crosperf/schedv2.py10
-rw-r--r--crosperf/settings_factory.py4
-rwxr-xr-xcrosperf/settings_factory_unittest.py2
-rw-r--r--crosperf/suite_runner.py28
-rwxr-xr-xcrosperf/suite_runner_unittest.py55
12 files changed, 160 insertions, 143 deletions
diff --git a/crosperf/benchmark_run_unittest.py b/crosperf/benchmark_run_unittest.py
index ab863004..9d815b80 100755
--- a/crosperf/benchmark_run_unittest.py
+++ b/crosperf/benchmark_run_unittest.py
@@ -61,7 +61,7 @@ class BenchmarkRunTest(unittest.TestCase):
cache_only=False,
log_level='average',
compiler='gcc',
- skylab=False)
+ crosfleet=False)
self.test_cache_conditions = [
CacheConditions.CACHE_FILE_EXISTS, CacheConditions.CHECKSUMS_MATCH
@@ -86,7 +86,7 @@ class BenchmarkRunTest(unittest.TestCase):
cache_only=False,
log_level='average',
compiler='gcc',
- skylab=False)
+ crosfleet=False)
logging_level = 'average'
m = MockMachineManager('/tmp/chromeos_root', 0, logging_level, '')
@@ -133,10 +133,11 @@ class BenchmarkRunTest(unittest.TestCase):
pass
def test_run(self):
- br = benchmark_run.BenchmarkRun(
- 'test_run', self.test_benchmark, self.test_label, 1,
- self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
- 'average', '', {})
+ br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
+ self.test_label, 1,
+ self.test_cache_conditions,
+ self.mock_machine_manager, self.mock_logger,
+ 'average', '', {})
def MockLogOutput(msg, print_to_console=False):
"""Helper function for test_run."""
@@ -273,10 +274,11 @@ class BenchmarkRunTest(unittest.TestCase):
self.assertEqual(self.status, ['FAILED'])
def test_terminate_pass(self):
- br = benchmark_run.BenchmarkRun(
- 'test_run', self.test_benchmark, self.test_label, 1,
- self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
- 'average', '', {})
+ br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
+ self.test_label, 1,
+ self.test_cache_conditions,
+ self.mock_machine_manager, self.mock_logger,
+ 'average', '', {})
def GetLastEventPassed():
"""Helper function for test_terminate_pass"""
@@ -300,10 +302,11 @@ class BenchmarkRunTest(unittest.TestCase):
self.assertEqual(self.status, benchmark_run.STATUS_FAILED)
def test_terminate_fail(self):
- br = benchmark_run.BenchmarkRun(
- 'test_run', self.test_benchmark, self.test_label, 1,
- self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
- 'average', '', {})
+ br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
+ self.test_label, 1,
+ self.test_cache_conditions,
+ self.mock_machine_manager, self.mock_logger,
+ 'average', '', {})
def GetLastEventFailed():
"""Helper function for test_terminate_fail"""
@@ -327,10 +330,11 @@ class BenchmarkRunTest(unittest.TestCase):
self.assertEqual(self.status, benchmark_run.STATUS_SUCCEEDED)
def test_acquire_machine(self):
- br = benchmark_run.BenchmarkRun(
- 'test_run', self.test_benchmark, self.test_label, 1,
- self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
- 'average', '', {})
+ br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
+ self.test_label, 1,
+ self.test_cache_conditions,
+ self.mock_machine_manager, self.mock_logger,
+ 'average', '', {})
br.terminated = True
self.assertRaises(Exception, br.AcquireMachine)
@@ -344,10 +348,11 @@ class BenchmarkRunTest(unittest.TestCase):
self.assertEqual(machine.name, 'chromeos1-row3-rack5-host7.cros')
def test_get_extra_autotest_args(self):
- br = benchmark_run.BenchmarkRun(
- 'test_run', self.test_benchmark, self.test_label, 1,
- self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
- 'average', '', {})
+ br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
+ self.test_label, 1,
+ self.test_cache_conditions,
+ self.mock_machine_manager, self.mock_logger,
+ 'average', '', {})
def MockLogError(err_msg):
"""Helper function for test_get_extra_autotest_args"""
@@ -379,10 +384,11 @@ class BenchmarkRunTest(unittest.TestCase):
@mock.patch.object(SuiteRunner, 'Run')
@mock.patch.object(Result, 'CreateFromRun')
def test_run_test(self, mock_result, mock_runner):
- br = benchmark_run.BenchmarkRun(
- 'test_run', self.test_benchmark, self.test_label, 1,
- self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
- 'average', '', {})
+ br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
+ self.test_label, 1,
+ self.test_cache_conditions,
+ self.mock_machine_manager, self.mock_logger,
+ 'average', '', {})
self.status = []
@@ -409,15 +415,17 @@ class BenchmarkRunTest(unittest.TestCase):
br.profiler_args)
self.assertEqual(mock_result.call_count, 1)
- mock_result.assert_called_with(
- self.mock_logger, 'average', self.test_label, None, "{'Score':100}", '',
- 0, 'page_cycler.netsim.top_10', 'telemetry_Crosperf', '')
+ mock_result.assert_called_with(self.mock_logger, 'average', self.test_label,
+ None, "{'Score':100}", '', 0,
+ 'page_cycler.netsim.top_10',
+ 'telemetry_Crosperf', '')
def test_set_cache_conditions(self):
- br = benchmark_run.BenchmarkRun(
- 'test_run', self.test_benchmark, self.test_label, 1,
- self.test_cache_conditions, self.mock_machine_manager, self.mock_logger,
- 'average', '', {})
+ br = benchmark_run.BenchmarkRun('test_run', self.test_benchmark,
+ self.test_label, 1,
+ self.test_cache_conditions,
+ self.mock_machine_manager, self.mock_logger,
+ 'average', '', {})
phony_cache_conditions = [123, 456, True, False]
diff --git a/crosperf/experiment.py b/crosperf/experiment.py
index 6e2efd45..854d7f77 100644
--- a/crosperf/experiment.py
+++ b/crosperf/experiment.py
@@ -29,7 +29,7 @@ class Experiment(object):
cache_conditions, labels, benchmarks, experiment_file, email_to,
acquire_timeout, log_dir, log_level, share_cache,
results_directory, compress_results, locks_directory, cwp_dso,
- ignore_min_max, skylab, dut_config):
+ ignore_min_max, crosfleet, dut_config):
self.name = name
self.working_directory = working_directory
self.remote = remote
@@ -56,14 +56,14 @@ class Experiment(object):
self.lock_mgr = None
self.cwp_dso = cwp_dso
self.ignore_min_max = ignore_min_max
- self.skylab = skylab
+ self.crosfleet = crosfleet
self.l = logger.GetLogger(log_dir)
if not self.benchmarks:
raise RuntimeError('No benchmarks specified')
if not self.labels:
raise RuntimeError('No labels specified')
- if not remote and not self.skylab:
+ if not remote and not self.crosfleet:
raise RuntimeError('No remote hosts specified')
# We need one chromeos_root to run the benchmarks in, but it doesn't
@@ -123,10 +123,11 @@ class Experiment(object):
logger_to_use = logger.Logger(self.log_dir, 'run.%s' % (full_name),
True)
benchmark_runs.append(
- benchmark_run.BenchmarkRun(
- benchmark_run_name, benchmark, label, iteration,
- self.cache_conditions, self.machine_manager, logger_to_use,
- self.log_level, self.share_cache, dut_config))
+ benchmark_run.BenchmarkRun(benchmark_run_name, benchmark, label,
+ iteration, self.cache_conditions,
+ self.machine_manager, logger_to_use,
+ self.log_level, self.share_cache,
+ dut_config))
return benchmark_runs
@@ -223,6 +224,6 @@ class Experiment(object):
m for m in self.locked_machines if m not in unlocked_machines
]
if failed_machines:
- raise RuntimeError(
- 'These machines are not unlocked correctly: %s' % failed_machines)
+ raise RuntimeError('These machines are not unlocked correctly: %s' %
+ failed_machines)
self.lock_mgr = None
diff --git a/crosperf/experiment_factory.py b/crosperf/experiment_factory.py
index 332f0357..73928756 100644
--- a/crosperf/experiment_factory.py
+++ b/crosperf/experiment_factory.py
@@ -104,9 +104,10 @@ class ExperimentFactory(object):
show_all_results, retries, run_local, cwp_dso, weight):
"""Add all the tests in a set to the benchmarks list."""
for test_name in benchmark_list:
- telemetry_benchmark = Benchmark(
- test_name, test_name, test_args, iterations, rm_chroot_tmp, perf_args,
- suite, show_all_results, retries, run_local, cwp_dso, weight)
+ telemetry_benchmark = Benchmark(test_name, test_name, test_args,
+ iterations, rm_chroot_tmp, perf_args,
+ suite, show_all_results, retries,
+ run_local, cwp_dso, weight)
benchmarks.append(telemetry_benchmark)
def GetExperiment(self, experiment_file, working_directory, log_dir):
@@ -119,9 +120,9 @@ class ExperimentFactory(object):
if log_level not in ('quiet', 'average', 'verbose'):
log_level = 'verbose'
- skylab = global_settings.GetField('skylab')
- # Check whether skylab tool is installed correctly for skylab mode.
- if skylab and not self.CheckSkylabTool(chromeos_root, log_level):
+ crosfleet = global_settings.GetField('crosfleet')
+ # Check whether crosfleet tool is installed correctly for crosfleet mode.
+ if crosfleet and not self.CheckCrosfleetTool(chromeos_root, log_level):
sys.exit(0)
remote = global_settings.GetField('remote')
@@ -266,10 +267,11 @@ class ExperimentFactory(object):
perf_args, suite, show_all_results, retries,
run_local, cwp_dso, weight)
elif test_name == 'all_crosbolt_perf':
- self.AppendBenchmarkSet(
- benchmarks, telemetry_crosbolt_perf_tests, test_args, iterations,
- rm_chroot_tmp, perf_args, 'telemetry_Crosperf', show_all_results,
- retries, run_local, cwp_dso, weight)
+ self.AppendBenchmarkSet(benchmarks, telemetry_crosbolt_perf_tests,
+ test_args, iterations, rm_chroot_tmp,
+ perf_args, 'telemetry_Crosperf',
+ show_all_results, retries, run_local, cwp_dso,
+ weight)
self.AppendBenchmarkSet(
benchmarks,
crosbolt_perf_tests,
@@ -321,10 +323,11 @@ class ExperimentFactory(object):
# cwp_dso=cwp_dso,
# weight=weight))
elif test_name == 'all_toolchain_perf_old':
- self.AppendBenchmarkSet(
- benchmarks, telemetry_toolchain_old_perf_tests, test_args,
- iterations, rm_chroot_tmp, perf_args, suite, show_all_results,
- retries, run_local, cwp_dso, weight)
+ self.AppendBenchmarkSet(benchmarks,
+ telemetry_toolchain_old_perf_tests, test_args,
+ iterations, rm_chroot_tmp, perf_args, suite,
+ show_all_results, retries, run_local, cwp_dso,
+ weight)
else:
benchmark = Benchmark(benchmark_name, test_name, test_args,
iterations, rm_chroot_tmp, perf_args, suite,
@@ -389,8 +392,9 @@ class ExperimentFactory(object):
my_remote = new_remote
if image:
- if skylab:
- raise RuntimeError('In skylab mode, local image should not be used.')
+ if crosfleet:
+ raise RuntimeError(
+ 'In crosfleet mode, local image should not be used.')
if build:
raise RuntimeError('Image path and build are provided at the same '
'time, please use only one of them.')
@@ -407,7 +411,7 @@ class ExperimentFactory(object):
# TODO(yunlian): We should consolidate code in machine_manager.py
# to derermine whether we are running from within google or not
if ('corp.google.com' in socket.gethostname() and not my_remote and
- not skylab):
+ not crosfleet):
my_remote = self.GetDefaultRemotes(board)
if global_settings.GetField('same_machine') and len(my_remote) > 1:
raise RuntimeError('Only one remote is allowed when same_machine '
@@ -418,12 +422,12 @@ class ExperimentFactory(object):
# pylint: disable=too-many-function-args
label = MockLabel(label_name, build, image, autotest_path, debug_path,
chromeos_root, board, my_remote, image_args,
- cache_dir, cache_only, log_level, compiler, skylab,
+ cache_dir, cache_only, log_level, compiler, crosfleet,
chrome_src)
else:
label = Label(label_name, build, image, autotest_path, debug_path,
chromeos_root, board, my_remote, image_args, cache_dir,
- cache_only, log_level, compiler, skylab, chrome_src)
+ cache_only, log_level, compiler, crosfleet, chrome_src)
labels.append(label)
if not labels:
@@ -432,15 +436,15 @@ class ExperimentFactory(object):
email = global_settings.GetField('email')
all_remote += list(set(my_remote))
all_remote = list(set(all_remote))
- if skylab:
+ if crosfleet:
for remote in all_remote:
- self.CheckRemotesInSkylab(remote)
+ self.CheckRemotesInCrosfleet(remote)
experiment = Experiment(experiment_name, all_remote, working_directory,
chromeos_root, cache_conditions, labels, benchmarks,
experiment_file.Canonicalize(), email,
acquire_timeout, log_dir, log_level, share_cache,
results_dir, compress_results, locks_dir, cwp_dso,
- ignore_min_max, skylab, dut_config)
+ ignore_min_max, crosfleet, dut_config)
return experiment
@@ -464,26 +468,27 @@ class ExperimentFactory(object):
else:
raise RuntimeError('There is no remote for {0}'.format(board))
- def CheckRemotesInSkylab(self, remote):
+ def CheckRemotesInCrosfleet(self, remote):
# TODO: (AI:zhizhouy) need to check whether a remote is a local or lab
# machine. If not lab machine, raise an error.
pass
- def CheckSkylabTool(self, chromeos_root, log_level):
- SKYLAB_PATH = '/usr/local/bin/skylab'
- if os.path.exists(SKYLAB_PATH):
+ def CheckCrosfleetTool(self, chromeos_root, log_level):
+ CROSFLEET_PATH = 'crosfleet'
+ if os.path.exists(CROSFLEET_PATH):
return True
l = logger.GetLogger()
- l.LogOutput('Skylab tool not installed, trying to install it.')
+ l.LogOutput('Crosfleet tool not installed, trying to install it.')
ce = command_executer.GetCommandExecuter(l, log_level=log_level)
setup_lab_tools = os.path.join(chromeos_root, 'chromeos-admin', 'lab-tools',
'setup_lab_tools')
cmd = '%s' % setup_lab_tools
status = ce.RunCommand(cmd)
if status != 0:
- raise RuntimeError('Skylab tool not installed correctly, please try to '
- 'manually install it from %s' % setup_lab_tools)
- l.LogOutput('Skylab is installed at %s, please login before first use. '
- 'Login by running "skylab login" and follow instructions.' %
- SKYLAB_PATH)
+ raise RuntimeError(
+ 'Crosfleet tool not installed correctly, please try to '
+ 'manually install it from %s' % setup_lab_tools)
+ l.LogOutput('Crosfleet is installed at %s, please login before first use. '
+ 'Login by running "crosfleet login" and follow instructions.' %
+ CROSFLEET_PATH)
return False
diff --git a/crosperf/experiment_factory_unittest.py b/crosperf/experiment_factory_unittest.py
index 3528eb1f..53d28f74 100755
--- a/crosperf/experiment_factory_unittest.py
+++ b/crosperf/experiment_factory_unittest.py
@@ -417,28 +417,28 @@ class ExperimentFactoryTest(unittest.TestCase):
@mock.patch.object(command_executer.CommandExecuter, 'RunCommand')
@mock.patch.object(os.path, 'exists')
- def test_check_skylab_tool(self, mock_exists, mock_runcmd):
+ def test_check_crosfleet_tool(self, mock_exists, mock_runcmd):
ef = ExperimentFactory()
chromeos_root = '/tmp/chromeos'
log_level = 'average'
mock_exists.return_value = True
- ret = ef.CheckSkylabTool(chromeos_root, log_level)
+ ret = ef.CheckCrosfleetTool(chromeos_root, log_level)
self.assertTrue(ret)
mock_exists.return_value = False
mock_runcmd.return_value = 1
with self.assertRaises(RuntimeError) as err:
- ef.CheckSkylabTool(chromeos_root, log_level)
+ ef.CheckCrosfleetTool(chromeos_root, log_level)
self.assertEqual(mock_runcmd.call_count, 1)
self.assertEqual(
- str(err.exception), 'Skylab tool not installed '
+ str(err.exception), 'Crosfleet tool not installed '
'correctly, please try to manually install it from '
'/tmp/chromeos/chromeos-admin/lab-tools/setup_lab_tools')
mock_runcmd.return_value = 0
mock_runcmd.call_count = 0
- ret = ef.CheckSkylabTool(chromeos_root, log_level)
+ ret = ef.CheckCrosfleetTool(chromeos_root, log_level)
self.assertEqual(mock_runcmd.call_count, 1)
self.assertFalse(ret)
diff --git a/crosperf/experiment_runner.py b/crosperf/experiment_runner.py
index 8ba85a4c..49aff425 100644
--- a/crosperf/experiment_runner.py
+++ b/crosperf/experiment_runner.py
@@ -107,15 +107,15 @@ class ExperimentRunner(object):
"""Get where is the machine from.
Returns:
- The location of the machine: local or skylab
+ The location of the machine: local or crosfleet
"""
# We assume that lab machine always starts with chromeos*, and local
# machines are ip address.
if 'chromeos' in machine:
- if lock_mgr.CheckMachineInSkylab(machine):
- return 'skylab'
+ if lock_mgr.CheckMachineInCrosfleet(machine):
+ return 'crosfleet'
else:
- raise RuntimeError('Lab machine not in Skylab.')
+ raise RuntimeError('Lab machine not in Crosfleet.')
return 'local'
def _LockAllMachines(self, experiment):
@@ -125,7 +125,7 @@ class ExperimentRunner(object):
in three different modes automatically, to prevent any other crosperf runs
from being able to update/use the machines while this experiment is
running:
- - Skylab machines: Use skylab lease-dut mechanism to lease
+ - Crosfleet machines: Use crosfleet lease-dut mechanism to lease
- Local machines: Use file lock mechanism to lock
"""
if test_flag.GetTestMode():
@@ -143,8 +143,8 @@ class ExperimentRunner(object):
machine_type = self._GetMachineType(experiment.lock_mgr, m)
if machine_type == 'local':
experiment.lock_mgr.AddMachineToLocal(m)
- elif machine_type == 'skylab':
- experiment.lock_mgr.AddMachineToSkylab(m)
+ elif machine_type == 'crosfleet':
+ experiment.lock_mgr.AddMachineToCrosfleet(m)
machine_states = experiment.lock_mgr.GetMachineStates('lock')
experiment.lock_mgr.CheckMachineLocks(machine_states, 'lock')
self.locked_machines = experiment.lock_mgr.UpdateMachines(True)
@@ -171,12 +171,12 @@ class ExperimentRunner(object):
def _Run(self, experiment):
try:
- # We should not lease machines if tests are launched via `skylab
- # create-test`. This is because leasing DUT in skylab will create a
+ # We should not lease machines if tests are launched via `crosfleet
+ # create-test`. This is because leasing DUT in crosfleet will create a
# no-op task on the DUT and new test created will be hanging there.
# TODO(zhizhouy): Need to check whether machine is ready or not before
# assigning a test to it.
- if not experiment.skylab:
+ if not experiment.crosfleet:
self._LockAllMachines(experiment)
# Calculate all checksums of avaiable/locked machines, to ensure same
# label has same machines for testing
diff --git a/crosperf/label.py b/crosperf/label.py
index a55d663c..30bf5f8c 100644
--- a/crosperf/label.py
+++ b/crosperf/label.py
@@ -32,7 +32,7 @@ class Label(object):
cache_only,
log_level,
compiler,
- skylab=False,
+ crosfleet=False,
chrome_src=None):
self.image_type = self._GetImageType(chromeos_image)
@@ -55,7 +55,7 @@ class Label(object):
self.log_level = log_level
self.chrome_version = ''
self.compiler = compiler
- self.skylab = skylab
+ self.crosfleet = crosfleet
if not chromeos_root:
if self.image_type == 'local':
@@ -153,7 +153,7 @@ class MockLabel(object):
cache_only,
log_level,
compiler,
- skylab=False,
+ crosfleet=False,
chrome_src=None):
self.name = name
self.build = build
@@ -174,7 +174,7 @@ class MockLabel(object):
self.checksum = ''
self.log_level = log_level
self.compiler = compiler
- self.skylab = skylab
+ self.crosfleet = crosfleet
self.chrome_version = 'Fake Chrome Version 50'
def _GetImageType(self, chromeos_image):
diff --git a/crosperf/mock_instance.py b/crosperf/mock_instance.py
index 842d6343..f44ed87c 100644
--- a/crosperf/mock_instance.py
+++ b/crosperf/mock_instance.py
@@ -25,7 +25,7 @@ label1 = MockLabel(
cache_only=False,
log_level='average',
compiler='gcc',
- skylab=False,
+ crosfleet=False,
chrome_src=None)
label2 = MockLabel(
@@ -42,7 +42,7 @@ label2 = MockLabel(
cache_only=False,
log_level='average',
compiler='gcc',
- skylab=False,
+ crosfleet=False,
chrome_src=None)
benchmark1 = Benchmark('benchmark1', 'autotest_name_1', 'autotest_args', 2, '',
diff --git a/crosperf/schedv2.py b/crosperf/schedv2.py
index 68e1e5b8..49c6344d 100644
--- a/crosperf/schedv2.py
+++ b/crosperf/schedv2.py
@@ -108,8 +108,8 @@ class DutWorker(Thread):
if self._terminated:
return 1
- if self._sched.get_experiment().skylab:
- self._logger.LogOutput('Skylab mode, do not image before testing.')
+ if self._sched.get_experiment().crosfleet:
+ self._logger.LogOutput('Crosfleet mode, do not image before testing.')
self._dut.label = label
return 0
@@ -295,9 +295,9 @@ class Schedv2(object):
# Split benchmarkruns set into segments. Each segment will be handled by
# a thread. Note, we use (x+3)/4 to mimic math.ceil(x/4).
n_threads = max(2, min(20, (n_benchmarkruns + 3) // 4))
- self._logger.LogOutput(('Starting {} threads to read cache status for '
- '{} benchmark runs ...').format(
- n_threads, n_benchmarkruns))
+ self._logger.LogOutput(
+ ('Starting {} threads to read cache status for '
+ '{} benchmark runs ...').format(n_threads, n_benchmarkruns))
benchmarkruns_per_thread = (n_benchmarkruns + n_threads - 1) // n_threads
benchmarkrun_segments = []
for i in range(n_threads - 1):
diff --git a/crosperf/settings_factory.py b/crosperf/settings_factory.py
index 7033a3e8..34326b68 100644
--- a/crosperf/settings_factory.py
+++ b/crosperf/settings_factory.py
@@ -156,8 +156,8 @@ class GlobalSettings(Settings):
'experiments on, e.g. x86-alex.'))
self.AddField(
BooleanField(
- 'skylab',
- description='Whether to run experiments via skylab.',
+ 'crosfleet',
+ description='Whether to run experiments via crosfleet.',
default=False))
self.AddField(
ListField(
diff --git a/crosperf/settings_factory_unittest.py b/crosperf/settings_factory_unittest.py
index bc107110..035da8d7 100755
--- a/crosperf/settings_factory_unittest.py
+++ b/crosperf/settings_factory_unittest.py
@@ -53,7 +53,7 @@ class GlobalSettingsTest(unittest.TestCase):
self.assertEqual(len(res.fields), 39)
self.assertEqual(res.GetField('name'), '')
self.assertEqual(res.GetField('board'), '')
- self.assertEqual(res.GetField('skylab'), False)
+ self.assertEqual(res.GetField('crosfleet'), False)
self.assertEqual(res.GetField('remote'), None)
self.assertEqual(res.GetField('rerun_if_failed'), False)
self.assertEqual(res.GetField('rm_chroot_tmp'), False)
diff --git a/crosperf/suite_runner.py b/crosperf/suite_runner.py
index 17e1ad73..6bd4ff39 100644
--- a/crosperf/suite_runner.py
+++ b/crosperf/suite_runner.py
@@ -18,7 +18,7 @@ from cros_utils import command_executer
TEST_THAT_PATH = '/usr/bin/test_that'
TAST_PATH = '/usr/bin/tast'
-SKYLAB_PATH = '/usr/local/bin/skylab'
+CROSFLEET_PATH = 'crosfleet'
GS_UTIL = 'src/chromium/depot_tools/gsutil.py'
AUTOTEST_DIR = '/mnt/host/source/src/third_party/autotest/files'
CHROME_MOUNT_DIR = '/tmp/chrome_root'
@@ -75,8 +75,8 @@ class SuiteRunner(object):
def Run(self, cros_machine, label, benchmark, test_args, profiler_args):
machine_name = cros_machine.name
for i in range(0, benchmark.retries + 1):
- if label.skylab:
- ret_tup = self.Skylab_Run(label, benchmark, test_args, profiler_args)
+ if label.crosfleet:
+ ret_tup = self.Crosfleet_Run(label, benchmark, test_args, profiler_args)
else:
if benchmark.suite == 'tast':
ret_tup = self.Tast_Run(machine_name, label, benchmark)
@@ -87,12 +87,12 @@ class SuiteRunner(object):
self.logger.LogOutput('benchmark %s failed. Retries left: %s' %
(benchmark.name, benchmark.retries - i))
elif i > 0:
- self.logger.LogOutput(
- 'benchmark %s succeded after %s retries' % (benchmark.name, i))
+ self.logger.LogOutput('benchmark %s succeded after %s retries' %
+ (benchmark.name, i))
break
else:
- self.logger.LogOutput(
- 'benchmark %s succeded on first try' % benchmark.name)
+ self.logger.LogOutput('benchmark %s succeded on first try' %
+ benchmark.name)
break
return ret_tup
@@ -238,8 +238,8 @@ class SuiteRunner(object):
self.logger.LogOutput('Result downloaded for task %s' % task_id)
return status
- def Skylab_Run(self, label, benchmark, test_args, profiler_args):
- """Run the test via skylab.."""
+ def Crosfleet_Run(self, label, benchmark, test_args, profiler_args):
+ """Run the test via crosfleet.."""
options = []
if label.board:
options.append('-board=%s' % label.board)
@@ -257,19 +257,19 @@ class SuiteRunner(object):
dimensions.append('-dim dut_name:%s' % dut.rstrip('.cros'))
command = (('%s create-test %s %s %s') % \
- (SKYLAB_PATH, ' '.join(dimensions), ' '.join(options),
+ (CROSFLEET_PATH, ' '.join(dimensions), ' '.join(options),
benchmark.suite if
(benchmark.suite == 'telemetry_Crosperf' or
benchmark.suite == 'crosperf_Wrapper')
else benchmark.test_name))
if self.log_level != 'verbose':
- self.logger.LogOutput('Starting skylab test.')
+ self.logger.LogOutput('Starting crosfleet test.')
self.logger.LogOutput('CMD: %s' % command)
ret_tup = self._ce.RunCommandWOutput(command, command_terminator=self._ct)
if ret_tup[0] != 0:
- self.logger.LogOutput('Skylab test not created successfully.')
+ self.logger.LogOutput('Crosfleet test not created successfully.')
return ret_tup
# Std output of the command will look like:
@@ -278,9 +278,9 @@ class SuiteRunner(object):
# number in the very end of the link address.
task_id = ret_tup[1].strip().split('b')[-1]
- command = ('skylab wait-task %s' % task_id)
+ command = ('crosfleet wait-task %s' % task_id)
if self.log_level != 'verbose':
- self.logger.LogOutput('Waiting for skylab test to finish.')
+ self.logger.LogOutput('Waiting for crosfleet test to finish.')
self.logger.LogOutput('CMD: %s' % command)
ret_tup = self._ce.RunCommandWOutput(command, command_terminator=self._ct)
diff --git a/crosperf/suite_runner_unittest.py b/crosperf/suite_runner_unittest.py
index 86e1ef19..c1eacb32 100755
--- a/crosperf/suite_runner_unittest.py
+++ b/crosperf/suite_runner_unittest.py
@@ -64,16 +64,17 @@ class SuiteRunnerTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(SuiteRunnerTest, self).__init__(*args, **kwargs)
- self.skylab_run_args = []
+ self.crosfleet_run_args = []
self.test_that_args = []
self.tast_args = []
- self.call_skylab_run = False
+ self.call_crosfleet_run = False
self.call_test_that_run = False
self.call_tast_run = False
def setUp(self):
- self.runner = suite_runner.SuiteRunner(
- {}, self.mock_logger, 'verbose', self.mock_cmd_exec, self.mock_cmd_term)
+ self.runner = suite_runner.SuiteRunner({}, self.mock_logger, 'verbose',
+ self.mock_cmd_exec,
+ self.mock_cmd_term)
def test_get_profiler_args(self):
input_str = ("--profiler=custom_perf --profiler_args='perf_options"
@@ -98,16 +99,18 @@ class SuiteRunnerTest(unittest.TestCase):
def reset():
self.test_that_args = []
- self.skylab_run_args = []
+ self.crosfleet_run_args = []
self.tast_args = []
self.call_test_that_run = False
- self.call_skylab_run = False
+ self.call_crosfleet_run = False
self.call_tast_run = False
- def FakeSkylabRun(test_label, benchmark, test_args, profiler_args):
- self.skylab_run_args = [test_label, benchmark, test_args, profiler_args]
- self.call_skylab_run = True
- return 'Ran FakeSkylabRun'
+ def FakeCrosfleetRun(test_label, benchmark, test_args, profiler_args):
+ self.crosfleet_run_args = [
+ test_label, benchmark, test_args, profiler_args
+ ]
+ self.call_crosfleet_run = True
+ return 'Ran FakeCrosfleetRun'
def FakeTestThatRun(machine, test_label, benchmark, test_args,
profiler_args):
@@ -122,7 +125,7 @@ class SuiteRunnerTest(unittest.TestCase):
self.call_tast_run = True
return 'Ran FakeTastRun'
- self.runner.Skylab_Run = FakeSkylabRun
+ self.runner.Crosfleet_Run = FakeCrosfleetRun
self.runner.Test_That_Run = FakeTestThatRun
self.runner.Tast_Run = FakeTastRun
@@ -137,31 +140,31 @@ class SuiteRunnerTest(unittest.TestCase):
test_args = ''
profiler_args = ''
- # Test skylab run for telemetry_Crosperf and crosperf_Wrapper benchmarks.
- self.mock_label.skylab = True
+ # Test crosfleet run for telemetry_Crosperf and crosperf_Wrapper benchmarks.
+ self.mock_label.crosfleet = True
reset()
self.runner.Run(cros_machine, self.mock_label, self.crosperf_wrapper_bench,
test_args, profiler_args)
- self.assertTrue(self.call_skylab_run)
+ self.assertTrue(self.call_crosfleet_run)
self.assertFalse(self.call_test_that_run)
- self.assertEqual(self.skylab_run_args,
+ self.assertEqual(self.crosfleet_run_args,
[self.mock_label, self.crosperf_wrapper_bench, '', ''])
reset()
self.runner.Run(cros_machine, self.mock_label,
self.telemetry_crosperf_bench, test_args, profiler_args)
- self.assertTrue(self.call_skylab_run)
+ self.assertTrue(self.call_crosfleet_run)
self.assertFalse(self.call_test_that_run)
- self.assertEqual(self.skylab_run_args,
+ self.assertEqual(self.crosfleet_run_args,
[self.mock_label, self.telemetry_crosperf_bench, '', ''])
# Test test_that run for telemetry_Crosperf and crosperf_Wrapper benchmarks.
- self.mock_label.skylab = False
+ self.mock_label.crosfleet = False
reset()
self.runner.Run(cros_machine, self.mock_label, self.crosperf_wrapper_bench,
test_args, profiler_args)
self.assertTrue(self.call_test_that_run)
- self.assertFalse(self.call_skylab_run)
+ self.assertFalse(self.call_crosfleet_run)
self.assertEqual(
self.test_that_args,
['fake_machine', self.mock_label, self.crosperf_wrapper_bench, '', ''])
@@ -170,7 +173,7 @@ class SuiteRunnerTest(unittest.TestCase):
self.runner.Run(cros_machine, self.mock_label,
self.telemetry_crosperf_bench, test_args, profiler_args)
self.assertTrue(self.call_test_that_run)
- self.assertFalse(self.call_skylab_run)
+ self.assertFalse(self.call_crosfleet_run)
self.assertEqual(self.test_that_args, [
'fake_machine', self.mock_label, self.telemetry_crosperf_bench, '', ''
])
@@ -180,7 +183,7 @@ class SuiteRunnerTest(unittest.TestCase):
self.runner.Run(cros_machine, self.mock_label, self.tast_bench, '', '')
self.assertTrue(self.call_tast_run)
self.assertFalse(self.call_test_that_run)
- self.assertFalse(self.call_skylab_run)
+ self.assertFalse(self.call_crosfleet_run)
self.assertEqual(self.tast_args,
['fake_machine', self.mock_label, self.tast_bench])
@@ -257,7 +260,7 @@ class SuiteRunnerTest(unittest.TestCase):
@mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput')
@mock.patch.object(json, 'loads')
- def test_skylab_run_client(self, mock_json_loads, mock_runcmd):
+ def test_crosfleet_run_client(self, mock_json_loads, mock_runcmd):
def FakeDownloadResult(l, task_id):
if l and task_id:
@@ -279,10 +282,10 @@ class SuiteRunnerTest(unittest.TestCase):
}
self.mock_json.loads = mock_json_loads
- self.mock_label.skylab = True
+ self.mock_label.crosfleet = True
self.runner.DownloadResult = FakeDownloadResult
- res = self.runner.Skylab_Run(self.mock_label, self.crosperf_wrapper_bench,
- '', '')
+ res = self.runner.Crosfleet_Run(self.mock_label,
+ self.crosperf_wrapper_bench, '', '')
ret_tup = (0, '\nResults placed in tmp/swarming-12345\n', '')
self.assertEqual(res, ret_tup)
self.assertEqual(mock_runcmd.call_count, 2)
@@ -293,7 +296,7 @@ class SuiteRunnerTest(unittest.TestCase):
self.assertEqual(args_dict['command_terminator'], self.mock_cmd_term)
args_list = mock_runcmd.call_args_list[1][0]
- self.assertEqual(args_list[0], ('skylab wait-task 12345'))
+ self.assertEqual(args_list[0], ('crosfleet wait-task 12345'))
self.assertEqual(args_dict['command_terminator'], self.mock_cmd_term)