aboutsummaryrefslogtreecommitdiff
path: root/cros_utils
diff options
context:
space:
mode:
Diffstat (limited to 'cros_utils')
-rwxr-xr-xcros_utils/buildbot_json.py6
-rw-r--r--cros_utils/buildbot_utils.py19
-rwxr-xr-xcros_utils/buildbot_utils_unittest.py34
-rw-r--r--cros_utils/command_executer.py49
-rwxr-xr-xcros_utils/command_executer_unittest.py2
-rw-r--r--cros_utils/contextlib3.py116
-rwxr-xr-xcros_utils/contextlib3_test.py195
-rwxr-xr-xcros_utils/email_sender.py16
-rw-r--r--cros_utils/locks.py25
-rw-r--r--cros_utils/misc.py13
-rw-r--r--[-rwxr-xr-x]cros_utils/no_pseudo_terminal_test.py20
-rw-r--r--cros_utils/tabulator.py435
-rw-r--r--cros_utils/tabulator_test.py73
13 files changed, 143 insertions, 860 deletions
diff --git a/cros_utils/buildbot_json.py b/cros_utils/buildbot_json.py
index 42a27744..8a9d9cb8 100755
--- a/cros_utils/buildbot_json.py
+++ b/cros_utils/buildbot_json.py
@@ -316,7 +316,7 @@ class NonAddressableNodeList(VirtualNodeList): # pylint: disable=W0223
@property
def cached_children(self):
if self.parent.cached_data is not None:
- for i in range(len(self.parent.cached_data[self.subkey])):
+ for i in xrange(len(self.parent.cached_data[self.subkey])):
yield self[i]
@property
@@ -352,7 +352,7 @@ class NonAddressableNodeList(VirtualNodeList): # pylint: disable=W0223
def __iter__(self):
"""Enables 'for i in obj:'. It returns children."""
if self.data:
- for i in range(len(self.data)):
+ for i in xrange(len(self.data)):
yield self[i]
def __getitem__(self, key):
@@ -868,7 +868,7 @@ class Builds(AddressableNodeList):
# Only cache keys here.
self.cache_keys()
if self._keys:
- for i in range(max(self._keys), -1, -1):
+ for i in xrange(max(self._keys), -1, -1):
yield self[i]
def cache_keys(self):
diff --git a/cros_utils/buildbot_utils.py b/cros_utils/buildbot_utils.py
index 35dc3ac6..911ea03e 100644
--- a/cros_utils/buildbot_utils.py
+++ b/cros_utils/buildbot_utils.py
@@ -1,8 +1,6 @@
-# -*- coding: utf-8 -*-
# Copyright 2017 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Utilities for launching and accessing ChromeOS buildbots."""
from __future__ import print_function
@@ -19,9 +17,9 @@ from cros_utils import logger
INITIAL_SLEEP_TIME = 7200 # 2 hours; wait time before polling buildbot.
SLEEP_TIME = 600 # 10 minutes; time between polling of buildbot.
-# Some of our slower builders (llvm-next) are taking more
-# than 11 hours. So, increase this TIME_OUT to 12 hours.
-TIME_OUT = 43200 # Decide the build is dead or will never finish
+# Some of our slower builders (llmv-next) are taking more
+# than 8 hours. So, increase this TIME_OUT to 9 hours.
+TIME_OUT = 32400 # Decide the build is dead or will never finish
class BuildbotTimeout(Exception):
@@ -61,10 +59,6 @@ def PeekTrybotImage(chromeos_root, buildbucket_id):
results = json.loads(out)[buildbucket_id]
- # Handle the case where the tryjob failed to launch correctly.
- if results['artifacts_url'] is None:
- return (results['status'], '')
-
return (results['status'], results['artifacts_url'].rstrip('/'))
@@ -116,7 +110,7 @@ def SubmitTryjob(chromeos_root,
# Launch buildbot with appropriate flags.
build = buildbot_name
- command = ('cros_sdk -- cros tryjob --yes --json --nochromesdk %s %s %s' %
+ command = ('cros tryjob --yes --json --nochromesdk %s %s %s' %
(tryjob_flags, patch_arg, build))
print('CMD: %s' % command)
_, out, _ = RunCommandInPath(chromeos_root, command)
@@ -251,10 +245,5 @@ def GetLatestImage(chromeos_root, path):
candidates.sort(reverse=True)
for c in candidates:
build = '%s/R%d-%d.%d.%d' % (path, c[0], c[1], c[2], c[3])
- # Blacklist "R79-12384.0.0" image released by mistake.
- # TODO(crbug.com/992242): Remove the filter by 2019-09-05.
- if c == [79, 12384, 0, 0]:
- continue
-
if DoesImageExist(chromeos_root, build):
return build
diff --git a/cros_utils/buildbot_utils_unittest.py b/cros_utils/buildbot_utils_unittest.py
index bfba8d78..c57b2d32 100755
--- a/cros_utils/buildbot_utils_unittest.py
+++ b/cros_utils/buildbot_utils_unittest.py
@@ -1,10 +1,8 @@
#!/usr/bin/env python2
-# -*- coding: utf-8 -*-
-#
+
# Copyright 2018 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Unittest for buildbot_utils.py."""
from __future__ import print_function
@@ -36,19 +34,9 @@ class TrybotTest(unittest.TestCase):
tryjob_out = (
'[{"buildbucket_id": "8952721143823688176", "build_config": '
'"cave-llvm-toolchain-tryjob", "url": '
- # pylint: disable=line-too-long
'"http://cros-goldeneye/chromeos/healthmonitoring/buildDetails?buildbucketId=8952721143823688176"}]'
)
- GSUTILS_LS = '\n'.join([
- 'gs://chromeos-image-archive/{0}/R78-12421.0.0/',
- 'gs://chromeos-image-archive/{0}/R78-12422.0.0/',
- 'gs://chromeos-image-archive/{0}/R78-12423.0.0/',
- # "R79-12384.0.0" image should be blacklisted.
- # TODO(crbug.com/992242): Remove the filter by 2019-09-05.
- 'gs://chromeos-image-archive/{0}/R79-12384.0.0/',
- ])
-
buildresult_out = (
'{"8952721143823688176": {"status": "pass", "artifacts_url":'
'"gs://chromeos-image-archive/trybot-elm-release-tryjob/R67-10468.0.0-'
@@ -124,26 +112,6 @@ class TrybotTest(unittest.TestCase):
buildbucket_id = buildbot_utils.ParseTryjobBuildbucketId(self.tryjob_out)
self.assertEqual(buildbucket_id, self.buildbucket_id)
- def testGetLatestImageValid(self):
- with patch.object(command_executer.CommandExecuter,
- 'ChrootRunCommandWOutput') as mocked_run:
- with patch.object(buildbot_utils, 'DoesImageExist') as mocked_imageexist:
- IMAGE_DIR = 'lulu-release'
- mocked_run.return_value = (0, self.GSUTILS_LS.format(IMAGE_DIR), '')
- mocked_imageexist.return_value = True
- image = buildbot_utils.GetLatestImage('', IMAGE_DIR)
- self.assertEqual(image, '{0}/R78-12423.0.0'.format(IMAGE_DIR))
-
- def testGetLatestImageInvalid(self):
- with patch.object(command_executer.CommandExecuter,
- 'ChrootRunCommandWOutput') as mocked_run:
- with patch.object(buildbot_utils, 'DoesImageExist') as mocked_imageexist:
- IMAGE_DIR = 'kefka-release'
- mocked_run.return_value = (0, self.GSUTILS_LS.format(IMAGE_DIR), '')
- mocked_imageexist.return_value = False
- image = buildbot_utils.GetLatestImage('', IMAGE_DIR)
- self.assertIsNone(image)
-
if __name__ == '__main__':
unittest.main()
diff --git a/cros_utils/command_executer.py b/cros_utils/command_executer.py
index 08e4e6ae..ae1b2962 100644
--- a/cros_utils/command_executer.py
+++ b/cros_utils/command_executer.py
@@ -1,8 +1,6 @@
-# -*- coding: utf-8 -*-
# Copyright 2011 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Utilities to run commands in outside/inside chroot and on the board."""
from __future__ import print_function
@@ -68,7 +66,6 @@ class CommandExecuter(object):
command_timeout=None,
terminated_timeout=10,
print_to_console=True,
- env=None,
except_handler=lambda p, e: None):
"""Run a command.
@@ -107,8 +104,7 @@ class CommandExecuter(object):
stderr=subprocess.PIPE,
shell=True,
preexec_fn=os.setsid,
- executable='/bin/bash',
- env=env)
+ executable='/bin/bash')
full_stdout = ''
full_stderr = ''
@@ -128,9 +124,9 @@ class CommandExecuter(object):
if command_terminator and command_terminator.IsTerminated():
os.killpg(os.getpgid(p.pid), signal.SIGTERM)
if self.logger:
- self.logger.LogError(
- 'Command received termination request. '
- 'Killed child process group.', print_to_console)
+ self.logger.LogError('Command received termination request. '
+ 'Killed child process group.',
+ print_to_console)
break
l = my_poll.poll(100)
@@ -160,19 +156,18 @@ class CommandExecuter(object):
elif (terminated_timeout is not None and
time.time() - terminated_time > terminated_timeout):
if self.logger:
- self.logger.LogWarning(
- 'Timeout of %s seconds reached since '
- 'process termination.' % terminated_timeout, print_to_console)
+ self.logger.LogWarning('Timeout of %s seconds reached since '
+ 'process termination.' %
+ terminated_timeout, print_to_console)
break
if (command_timeout is not None and
time.time() - started_time > command_timeout):
os.killpg(os.getpgid(p.pid), signal.SIGTERM)
if self.logger:
- self.logger.LogWarning(
- 'Timeout of %s seconds reached since process'
- 'started. Killed child process group.' % command_timeout,
- print_to_console)
+ self.logger.LogWarning('Timeout of %s seconds reached since process'
+ 'started. Killed child process group.' %
+ command_timeout, print_to_console)
break
if out == err == '':
@@ -349,8 +344,7 @@ class CommandExecuter(object):
command_timeout=None,
terminated_timeout=10,
print_to_console=True,
- cros_sdk_options='',
- env=None):
+ cros_sdk_options=''):
"""Runs a command within the chroot.
Returns triplet (returncode, stdout, stderr).
@@ -377,9 +371,8 @@ class CommandExecuter(object):
# the chroot already exists. We want the final returned output to skip
# the output from chroot creation steps.
if return_output:
- ret = self.RunCommand(
- 'cd %s; cros_sdk %s -- true' % (chromeos_root, cros_sdk_options),
- env=env)
+ ret = self.RunCommand('cd %s; cros_sdk %s -- true' % (chromeos_root,
+ cros_sdk_options))
if ret:
return (ret, '', '')
@@ -394,8 +387,7 @@ class CommandExecuter(object):
command_terminator=command_terminator,
command_timeout=command_timeout,
terminated_timeout=terminated_timeout,
- print_to_console=print_to_console,
- env=env)
+ print_to_console=print_to_console)
os.remove(command_file)
return ret
@@ -457,15 +449,15 @@ class CommandExecuter(object):
src = src + '/'
dest = dest + '/'
- if src_cros or dest_cros:
+ if src_cros == True or dest_cros == True:
if self.logger:
- self.logger.LogFatalIf(
- src_cros == dest_cros, 'Only one of src_cros and desc_cros can '
- 'be True.')
+ self.logger.LogFatalIf(src_cros == dest_cros,
+ 'Only one of src_cros and desc_cros can '
+ 'be True.')
self.logger.LogFatalIf(not chromeos_root, 'chromeos_root not given!')
elif src_cros == dest_cros or not chromeos_root:
sys.exit(1)
- if src_cros:
+ if src_cros == True:
cros_machine = src_machine
else:
cros_machine = dest_machine
@@ -475,7 +467,7 @@ class CommandExecuter(object):
'ssh -p ${FLAGS_ssh_port}' + ' -o StrictHostKeyChecking=no' +
' -o UserKnownHostsFile=$(mktemp)' + ' -i $TMP_PRIVATE_KEY')
rsync_prefix = "\nrsync -r -e \"%s\" " % ssh_command
- if dest_cros:
+ if dest_cros == True:
command += rsync_prefix + '%s root@%s:%s' % (src, dest_machine, dest)
return self.RunCommand(
command,
@@ -662,7 +654,6 @@ class MockCommandExecuter(CommandExecuter):
command_timeout=None,
terminated_timeout=10,
print_to_console=True,
- env=None,
except_handler=lambda p, e: None):
assert not command_timeout
cmd = str(cmd)
diff --git a/cros_utils/command_executer_unittest.py b/cros_utils/command_executer_unittest.py
index 4da4a4ac..f039ebc5 100755
--- a/cros_utils/command_executer_unittest.py
+++ b/cros_utils/command_executer_unittest.py
@@ -1,6 +1,4 @@
#!/usr/bin/env python2
-# -*- coding: utf-8 -*-
-
"""Unittest for command_executer.py."""
from __future__ import print_function
diff --git a/cros_utils/contextlib3.py b/cros_utils/contextlib3.py
deleted file mode 100644
index 9fabbf6e..00000000
--- a/cros_utils/contextlib3.py
+++ /dev/null
@@ -1,116 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""Random utilties from Python3's contextlib."""
-
-from __future__ import division
-from __future__ import print_function
-
-import sys
-
-
-class ExitStack(object):
- """https://docs.python.org/3/library/contextlib.html#contextlib.ExitStack"""
-
- def __init__(self):
- self._stack = []
- self._is_entered = False
-
- def _assert_is_entered(self):
- # Strictly, entering has no effect on the operations that call this.
- # However, if you're trying to e.g. push things to an ExitStack that hasn't
- # yet been entered, that's likely a bug.
- assert self._is_entered, 'ExitStack op performed before entering'
-
- def __enter__(self):
- self._is_entered = True
- return self
-
- def _perform_exit(self, exc_type, exc, exc_traceback):
- # I suppose a better name for this is
- # `take_exception_handling_into_our_own_hands`, but that's harder to type.
- exception_handled = False
- while self._stack:
- fn = self._stack.pop()
- # The except clause below is meant to run as-if it's a `finally` block,
- # but `finally` blocks don't have easy access to exceptions currently in
- # flight. Hence, we do need to catch things like KeyboardInterrupt,
- # SystemExit, ...
- # pylint: disable=bare-except
- try:
- # If an __exit__ handler returns a truthy value, we should assume that
- # it handled the exception appropriately. Otherwise, we need to keep it
- # with us. (PEP 343)
- if fn(exc_type, exc, exc_traceback):
- exc_type, exc, exc_traceback = None, None, None
- exception_handled = True
- except:
- # Python2 doesn't appear to have the notion of 'exception causes',
- # which is super unfortunate. In the case:
- #
- # @contextlib.contextmanager
- # def foo()
- # try:
- # yield
- # finally:
- # raise ValueError
- #
- # with foo():
- # assert False
- #
- # ...Python will only note the ValueError; nothing about the failing
- # assertion is printed.
- #
- # I guess on the bright side, that means we don't have to fiddle with
- # __cause__s/etc.
- exc_type, exc, exc_traceback = sys.exc_info()
- exception_handled = True
-
- if not exception_handled:
- return False
-
- # Something changed. We either need to raise for ourselves, or note that
- # the exception has been suppressed.
- if exc_type is not None:
- raise exc_type, exc, exc_traceback
-
- # Otherwise, the exception was suppressed. Go us!
- return True
-
- def __exit__(self, exc_type, exc, exc_traceback):
- return self._perform_exit(exc_type, exc, exc_traceback)
-
- def close(self):
- """Unwinds the exit stack, unregistering all events"""
- self._perform_exit(None, None, None)
-
- def enter_context(self, cm):
- """Enters the given context manager, and registers it to be exited."""
- self._assert_is_entered()
-
- # The spec specifically notes that we should take __exit__ prior to calling
- # __enter__.
- exit_cleanup = cm.__exit__
- result = cm.__enter__()
- self._stack.append(exit_cleanup)
- return result
-
- # pylint complains about `exit` being redefined. `exit` is the documented
- # name of this param, and renaming it would break portability if someone
- # decided to `push(exit=foo)`, so just ignore the lint.
- # pylint: disable=redefined-builtin
- def push(self, exit):
- """Like `enter_context`, but won't enter the value given."""
- self._assert_is_entered()
- self._stack.append(exit.__exit__)
-
- def callback(self, callback, *args, **kwargs):
- """Performs the given callback on exit"""
- self._assert_is_entered()
-
- def fn(_exc_type, _exc, _exc_traceback):
- callback(*args, **kwargs)
-
- self._stack.append(fn)
diff --git a/cros_utils/contextlib3_test.py b/cros_utils/contextlib3_test.py
deleted file mode 100755
index 76c010f2..00000000
--- a/cros_utils/contextlib3_test.py
+++ /dev/null
@@ -1,195 +0,0 @@
-#!/usr/bin/env python2
-# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""Tests for contextlib3"""
-
-from __future__ import division
-from __future__ import print_function
-
-import contextlib
-import unittest
-
-import contextlib3
-
-
-class SomeException(Exception):
- """Just an alternative to ValueError in the Exception class hierarchy."""
- pass
-
-
-class TestExitStack(unittest.TestCase):
- """Tests contextlib3.ExitStack"""
-
- def test_exceptions_in_exit_override_exceptions_in_with(self):
-
- @contextlib.contextmanager
- def raise_exit():
- raised = False
- try:
- yield
- except Exception:
- raised = True
- raise ValueError
- finally:
- self.assertTrue(raised)
-
- # (As noted in comments in contextlib3, this behavior is consistent with
- # how python2 works. Namely, if __exit__ raises, the exception from
- # __exit__ overrides the inner exception)
- with self.assertRaises(ValueError):
- with contextlib3.ExitStack() as stack:
- stack.enter_context(raise_exit())
- raise SomeException()
-
- def test_raising_in_exit_doesnt_block_later_exits(self):
- exited = []
-
- @contextlib.contextmanager
- def raise_exit():
- try:
- yield
- finally:
- exited.append('raise')
- raise ValueError
-
- @contextlib.contextmanager
- def push_exit():
- try:
- yield
- finally:
- exited.append('push')
-
- with self.assertRaises(ValueError):
- with contextlib3.ExitStack() as stack:
- stack.enter_context(push_exit())
- stack.enter_context(raise_exit())
- self.assertEqual(exited, ['raise', 'push'])
-
- exited = []
- with self.assertRaises(ValueError):
- with contextlib3.ExitStack() as stack:
- stack.enter_context(push_exit())
- stack.enter_context(raise_exit())
- raise SomeException()
- self.assertEqual(exited, ['raise', 'push'])
-
- def test_push_doesnt_enter_the_context(self):
- exited = []
-
- test_self = self
-
- class Manager(object):
- """A simple ContextManager for testing purposes"""
-
- def __enter__(self):
- test_self.fail('context manager was entered :(')
-
- def __exit__(self, *args, **kwargs):
- exited.append(1)
-
- with contextlib3.ExitStack() as stack:
- stack.push(Manager())
- self.assertEqual(exited, [])
- self.assertEqual(exited, [1])
-
- def test_callbacks_are_run_properly(self):
- callback_was_run = []
-
- def callback(arg, some_kwarg=None):
- self.assertEqual(arg, 41)
- self.assertEqual(some_kwarg, 42)
- callback_was_run.append(1)
-
- with contextlib3.ExitStack() as stack:
- stack.callback(callback, 41, some_kwarg=42)
- self.assertEqual(callback_was_run, [])
- self.assertEqual(callback_was_run, [1])
-
- callback_was_run = []
- with self.assertRaises(ValueError):
- with contextlib3.ExitStack() as stack:
- stack.callback(callback, 41, some_kwarg=42)
- raise ValueError()
- self.assertEqual(callback_was_run, [1])
-
- def test_finallys_are_run(self):
- finally_run = []
-
- @contextlib.contextmanager
- def append_on_exit():
- try:
- yield
- finally:
- finally_run.append(0)
-
- with self.assertRaises(ValueError):
- with contextlib3.ExitStack() as stack:
- stack.enter_context(append_on_exit())
- raise ValueError()
- self.assertEqual(finally_run, [0])
-
- def test_unwinding_happens_in_reverse_order(self):
- exit_runs = []
-
- @contextlib.contextmanager
- def append_things(start_push, end_push):
- exit_runs.append(start_push)
- try:
- yield
- finally:
- exit_runs.append(end_push)
-
- with contextlib3.ExitStack() as stack:
- stack.enter_context(append_things(1, 4))
- stack.enter_context(append_things(2, 3))
- self.assertEqual(exit_runs, [1, 2, 3, 4])
-
- exit_runs = []
- with self.assertRaises(ValueError):
- with contextlib3.ExitStack() as stack:
- stack.enter_context(append_things(1, 4))
- stack.enter_context(append_things(2, 3))
- raise ValueError
- self.assertEqual(exit_runs, [1, 2, 3, 4])
-
- def test_exceptions_are_propagated(self):
-
- @contextlib.contextmanager
- def die_on_regular_exit():
- yield
- self.fail('Unreachable in theory')
-
- with self.assertRaises(ValueError):
- with contextlib3.ExitStack() as stack:
- stack.enter_context(die_on_regular_exit())
- raise ValueError()
-
- def test_exceptions_can_be_blocked(self):
-
- @contextlib.contextmanager
- def block():
- try:
- yield
- except Exception:
- pass
-
- with contextlib3.ExitStack() as stack:
- stack.enter_context(block())
- raise ValueError()
-
- def test_objects_are_returned_from_enter_context(self):
-
- @contextlib.contextmanager
- def yield_arg(arg):
- yield arg
-
- with contextlib3.ExitStack() as stack:
- val = stack.enter_context(yield_arg(1))
- self.assertEqual(val, 1)
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/cros_utils/email_sender.py b/cros_utils/email_sender.py
index 0d2bd651..e5a20ad2 100755
--- a/cros_utils/email_sender.py
+++ b/cros_utils/email_sender.py
@@ -9,7 +9,6 @@ from email import encoders as Encoders
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
-import getpass
import os
import smtplib
import tempfile
@@ -87,7 +86,7 @@ class EmailSender(object):
ce = command_executer.GetCommandExecuter(log_level='none')
if not email_from:
- email_from = getpass.getuser() + '@google.com'
+ email_from = os.path.basename(__file__)
to_list = ','.join(email_to)
@@ -107,14 +106,13 @@ class EmailSender(object):
subject = subject.replace("'", "'\\''")
if msg_type == 'html':
- command = ("sendgmr --to='%s' --from='%s' --subject='%s' "
- "--html_file='%s' --body_file=/dev/null" %
- (to_list, email_from, subject, body_filename))
+ command = ("sendgmr --to='%s' --subject='%s' --html_file='%s' "
+ '--body_file=/dev/null' % (to_list, subject, body_filename))
else:
- command = (
- "sendgmr --to='%s' --from='%s' --subject='%s' "
- "--body_file='%s'" % (to_list, email_from, subject, body_filename))
-
+ command = ("sendgmr --to='%s' --subject='%s' --body_file='%s'" %
+ (to_list, subject, body_filename))
+ if email_from:
+ command += ' --from=%s' % email_from
if email_cc:
cc_list = ','.join(email_cc)
command += " --cc='%s'" % cc_list
diff --git a/cros_utils/locks.py b/cros_utils/locks.py
index 4ecbe0a9..cb96368e 100644
--- a/cros_utils/locks.py
+++ b/cros_utils/locks.py
@@ -1,29 +1,24 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
+# Copyright 2015 The Chromium OS Authors. All rights reserved.
"""Utilities for locking machines."""
from __future__ import print_function
import time
-import lock_machine
+import afe_lock_machine
import logger
def AcquireLock(machines, chromeos_root, timeout=1200):
- """Acquire lock for machine(s) with timeout."""
+ """Acquire lock for machine(s) with timeout, using AFE server for locking."""
start_time = time.time()
locked = True
sleep_time = min(10, timeout / 10.0)
while True:
try:
- lock_machine.LockManager(machines, False,
- chromeos_root).UpdateMachines(True)
+ afe_lock_machine.AFELockManager(machines, False, chromeos_root,
+ None).UpdateMachines(True)
break
except Exception as e:
if time.time() - start_time > timeout:
@@ -37,13 +32,13 @@ def AcquireLock(machines, chromeos_root, timeout=1200):
def ReleaseLock(machines, chromeos_root):
- """Release locked machine(s)."""
+ """Release locked machine(s), using AFE server for locking."""
unlocked = True
try:
- lock_machine.LockManager(machines, False,
- chromeos_root).UpdateMachines(False)
+ afe_lock_machine.AFELockManager(machines, False, chromeos_root,
+ None).UpdateMachines(False)
except Exception as e:
unlocked = False
- logger.GetLogger().LogWarning(
- 'Could not unlock %s. %s' % (repr(machines), str(e)))
+ logger.GetLogger().LogWarning('Could not unlock %s. %s' %
+ (repr(machines), str(e)))
return unlocked
diff --git a/cros_utils/misc.py b/cros_utils/misc.py
index 58076f40..f9034b89 100644
--- a/cros_utils/misc.py
+++ b/cros_utils/misc.py
@@ -161,11 +161,19 @@ def GetBuildImageCommand(board, dev=False):
def GetSetupBoardCommand(board,
+ gcc_version=None,
+ binutils_version=None,
usepkg=None,
force=None):
"""Get setup_board command."""
options = []
+ if gcc_version:
+ options.append('--gcc_version=%s' % gcc_version)
+
+ if binutils_version:
+ options.append('--binutils_version=%s' % binutils_version)
+
if usepkg:
options.append('--usepkg')
else:
@@ -174,9 +182,10 @@ def GetSetupBoardCommand(board,
if force:
options.append('--force')
- options.append('--accept-licenses=@CHROMEOS')
+ options.append('--accept_licenses=@CHROMEOS')
- return 'setup_board --board=%s %s' % (board, ' '.join(options))
+ return ('%s/setup_board --board=%s %s' % (CHROMEOS_SCRIPTS_DIR, board,
+ ' '.join(options)))
def CanonicalizePath(path):
diff --git a/cros_utils/no_pseudo_terminal_test.py b/cros_utils/no_pseudo_terminal_test.py
index 41d71539..43eabb13 100755..100644
--- a/cros_utils/no_pseudo_terminal_test.py
+++ b/cros_utils/no_pseudo_terminal_test.py
@@ -1,10 +1,3 @@
-#!/usr/bin/env python2
-# -*- coding: utf-8 -*-
-#
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
"""Test to ensure we're not touching /dev/ptmx when running commands."""
from __future__ import print_function
@@ -25,9 +18,9 @@ class NoPsuedoTerminalTest(unittest.TestCase):
def _AttachStraceToSelf(self, output_file):
"""Attaches strace to the current process."""
- args = ['sudo', 'strace', '-o', output_file, '-p', str(os.getpid())]
+ args = ['strace', '-o', output_file, '-p', str(os.getpid())]
print(args)
- self._strace_process = subprocess.Popen(args, preexec_fn=os.setpgrp)
+ self._strace_process = subprocess.Popen(args)
# Wait until we see some activity.
start_time = time.time()
while time.time() - start_time < self.STRACE_TIMEOUT:
@@ -38,12 +31,9 @@ class NoPsuedoTerminalTest(unittest.TestCase):
def _KillStraceProcess(self):
"""Kills strace that was started by _AttachStraceToSelf()."""
- pgid = os.getpgid(self._strace_process.pid)
- args = ['sudo', 'kill', str(pgid)]
- if subprocess.call(args) == 0:
- os.waitpid(pgid, 0)
- return True
- return False
+ self._strace_process.terminate()
+ self._strace_process.wait()
+ return True
def testNoPseudoTerminalWhenRunningCommand(self):
"""Test to make sure we're not touching /dev/ptmx when running commands."""
diff --git a/cros_utils/tabulator.py b/cros_utils/tabulator.py
index 59e4d426..6936d35f 100644
--- a/cros_utils/tabulator.py
+++ b/cros_utils/tabulator.py
@@ -1,8 +1,6 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-
"""Table generating, analyzing and printing functions.
This defines several classes that are used to generate, analyze and print
@@ -88,17 +86,6 @@ def _StripNone(results):
return res
-def _RemoveMinMax(cell, values):
- if len(values) < 3:
- print('WARNING: Values count is less than 3, not ignoring min/max values')
- print('WARNING: Cell name:', cell.name, 'Values:', values)
- return values
-
- values.remove(min(values))
- values.remove(max(values))
- return values
-
-
class TableGenerator(object):
"""Creates a table from a list of list of dicts.
@@ -165,7 +152,17 @@ class TableGenerator(object):
def GetTable(self, number_of_rows=sys.maxint):
"""Returns a table from a list of list of dicts.
- Examples:
+ The list of list of dicts is passed into the constructor of TableGenerator.
+ This method converts that into a canonical list of lists which represents a
+ table of values.
+
+ Args:
+ number_of_rows: Maximum number of rows to return from the table.
+
+ Returns:
+ A list of lists which is the table.
+
+ Example:
We have the following runs:
[[{"k1": "v1", "k2": "v2"}, {"k1": "v3"}],
[{"k1": "v4", "k4": "v5"}]]
@@ -178,16 +175,6 @@ class TableGenerator(object):
["k4", [], ["v5"]]]
The returned table can then be processed further by other classes in this
module.
-
- The list of list of dicts is passed into the constructor of TableGenerator.
- This method converts that into a canonical list of lists which represents a
- table of values.
-
- Args:
- number_of_rows: Maximum number of rows to return from the table.
-
- Returns:
- A list of lists which is the table.
"""
keys = self._GetKeys()
header = [self._key_name] + self._labels
@@ -200,7 +187,7 @@ class TableGenerator(object):
v = []
for run in run_list:
if k in run:
- if isinstance(run[k], list):
+ if type(run[k]) is list:
val = run[k][0]
unit = run[k][1]
else:
@@ -220,192 +207,6 @@ class TableGenerator(object):
return table
-class SamplesTableGenerator(TableGenerator):
- """Creates a table with only samples from the results
-
- The main public function is called GetTable().
-
- Different than TableGenerator, self._runs is now a dict of {benchmark: runs}
- We are expecting there is 'samples' in `runs`.
- """
-
- def __init__(self, run_keyvals, label_list, iter_counts, weights):
- TableGenerator.__init__(
- self, run_keyvals, label_list, key_name='Benchmarks')
- self._iter_counts = iter_counts
- self._weights = weights
-
- def _GetKeys(self):
- keys = self._runs.keys()
- return self._SortKeys(keys)
-
- def GetTable(self, number_of_rows=sys.maxint):
- """Returns a tuple, which contains three args:
-
- 1) a table from a list of list of dicts.
- 2) updated benchmark_results run_keyvals with composite benchmark
- 3) updated benchmark_results iter_count with composite benchmark
-
- The dict of list of list of dicts is passed into the constructor of
- SamplesTableGenerator.
- This method converts that into a canonical list of lists which
- represents a table of values.
-
- Examples:
- We have the following runs:
- {bench1: [[{"samples": "v1"}, {"samples": "v2"}],
- [{"samples": "v3"}, {"samples": "v4"}]]
- bench2: [[{"samples": "v21"}, None],
- [{"samples": "v22"}, {"samples": "v23"}]]}
- and weights of benchmarks:
- {bench1: w1, bench2: w2}
- and the following labels:
- ["vanilla", "modified"]
- it will return:
- [["Benchmark", "Weights", "vanilla", "modified"]
- ["bench1", w1,
- ((2, 0), ["v1*w1", "v2*w1"]), ((2, 0), ["v3*w1", "v4*w1"])]
- ["bench2", w2,
- ((1, 1), ["v21*w2", None]), ((2, 0), ["v22*w2", "v23*w2"])]
- ["Composite Benchmark", N/A,
- ((1, 1), ["v1*w1+v21*w2", None]),
- ((2, 0), ["v3*w1+v22*w2", "v4*w1+ v23*w2"])]]
- The returned table can then be processed further by other classes in this
- module.
-
- Args:
- number_of_rows: Maximum number of rows to return from the table.
-
- Returns:
- A list of lists which is the table.
- """
- keys = self._GetKeys()
- header = [self._key_name, 'Weights'] + self._labels
- table = [header]
- rows = 0
- iterations = 0
-
- for k in keys:
- bench_runs = self._runs[k]
- unit = None
- all_runs_empty = all(not dict for label in bench_runs for dict in label)
- if all_runs_empty:
- cell = Cell()
- cell.string_value = 'Benchmark %s contains no result.' + \
- ' Is the benchmark name valid?' % k
- table.append([cell])
- else:
- row = [k]
- row.append(self._weights[k])
- for run_list in bench_runs:
- run_pass = 0
- run_fail = 0
- v = []
- for run in run_list:
- if 'samples' in run:
- if isinstance(run['samples'], list):
- val = run['samples'][0] * self._weights[k]
- unit = run['samples'][1]
- else:
- val = run['samples'] * self._weights[k]
- v.append(val)
- run_pass += 1
- else:
- v.append(None)
- run_fail += 1
- one_tuple = ((run_pass, run_fail), v)
- if iterations != 0 and iterations != run_pass + run_fail:
- raise ValueError('Iterations of each benchmark run ' \
- 'are not the same')
- iterations = run_pass + run_fail
- row.append(one_tuple)
- if unit:
- keyname = row[0] + ' (%s) ' % unit
- row[0] = keyname
- table.append(row)
- rows += 1
- if rows == number_of_rows:
- break
-
- k = 'Composite Benchmark'
- if k in keys:
- raise RuntimeError('Composite benchmark already exists in results')
-
- # Create a new composite benchmark row at the bottom of the summary table
- # The new row will be like the format in example:
- # ["Composite Benchmark", N/A,
- # ((1, 1), ["v1*w1+v21*w2", None]),
- # ((2, 0), ["v3*w1+v22*w2", "v4*w1+ v23*w2"])]]
- # First we will create a row of [key, weight, [[0] * iterations] * labels]
- row = [None] * len(header)
- row[0] = '%s (samples)' % k
- row[1] = 'N/A'
- for label_index in range(2, len(row)):
- row[label_index] = [0] * iterations
-
- for cur_row in table[1:]:
- # Iterate through each benchmark
- if len(cur_row) > 1:
- for label_index in range(2, len(cur_row)):
- # Iterate through each run in a single benchmark
- # each result should look like ((pass, fail), [values_list])
- bench_runs = cur_row[label_index][1]
- for index in range(iterations):
- # Accumulate each run result to composite benchmark run
- # If any run fails, then we set this run for composite benchmark
- # to None so that we know it fails.
- if bench_runs[index] and row[label_index][index] != None:
- row[label_index][index] += bench_runs[index]
- else:
- row[label_index][index] = None
- else:
- # One benchmark totally fails, no valid data will be in final result
- for label_index in range(2, len(row)):
- row[label_index] = [None] * iterations
- break
- # Calculate pass and fail count for composite benchmark
- for label_index in range(2, len(row)):
- run_pass = 0
- run_fail = 0
- for run in row[label_index]:
- if run:
- run_pass += 1
- else:
- run_fail += 1
- row[label_index] = ((run_pass, run_fail), row[label_index])
- table.append(row)
-
- # Now that we have the table genearted, we want to store this new composite
- # benchmark into the benchmark_result in ResultReport object.
- # This will be used to generate a full table which contains our composite
- # benchmark.
- # We need to create composite benchmark result and add it to keyvals in
- # benchmark_results.
- v = []
- for label in row[2:]:
- # each label's result looks like ((pass, fail), [values])
- benchmark_runs = label[1]
- # List of values of each label
- single_run_list = []
- for run in benchmark_runs:
- # Result of each run under the same label is a dict of keys.
- # Here the only key we will add for composite benchmark is the
- # weighted_samples we added up.
- one_dict = {}
- if run:
- one_dict[u'weighted_samples'] = [run, u'samples']
- one_dict['retval'] = 0
- else:
- one_dict['retval'] = 1
- single_run_list.append(one_dict)
- v.append(single_run_list)
-
- self._runs[k] = v
- self._iter_counts[k] = iterations
-
- return (table, self._runs, self._iter_counts)
-
-
class Result(object):
"""A class that respresents a single result.
@@ -522,8 +323,8 @@ class NonEmptyCountResult(Result):
len_values = len(values)
len_baseline_values = len(baseline_values)
tmp_cell = Cell()
- tmp_cell.value = 1.0 + (
- float(cell.value - base_value) / (max(len_values, len_baseline_values)))
+ tmp_cell.value = 1.0 + (float(cell.value - base_value) /
+ (max(len_values, len_baseline_values)))
f.Compute(tmp_cell)
cell.bgcolor = tmp_cell.bgcolor
@@ -541,13 +342,7 @@ class StringMeanResult(Result):
class AmeanResult(StringMeanResult):
"""Arithmetic mean."""
- def __init__(self, ignore_min_max=False):
- super(AmeanResult, self).__init__()
- self.ignore_min_max = ignore_min_max
-
def _ComputeFloat(self, cell, values, baseline_values):
- if self.ignore_min_max:
- values = _RemoveMinMax(cell, values)
cell.value = numpy.mean(values)
@@ -556,11 +351,6 @@ class RawResult(Result):
pass
-class IterationResult(Result):
- """Iteration result."""
- pass
-
-
class MinResult(Result):
"""Minimum."""
@@ -597,26 +387,14 @@ class NumericalResult(Result):
class StdResult(NumericalResult):
"""Standard deviation."""
- def __init__(self, ignore_min_max=False):
- super(StdResult, self).__init__()
- self.ignore_min_max = ignore_min_max
-
def _ComputeFloat(self, cell, values, baseline_values):
- if self.ignore_min_max:
- values = _RemoveMinMax(cell, values)
cell.value = numpy.std(values)
class CoeffVarResult(NumericalResult):
"""Standard deviation / Mean"""
- def __init__(self, ignore_min_max=False):
- super(CoeffVarResult, self).__init__()
- self.ignore_min_max = ignore_min_max
-
def _ComputeFloat(self, cell, values, baseline_values):
- if self.ignore_min_max:
- values = _RemoveMinMax(cell, values)
if numpy.mean(values) != 0.0:
noise = numpy.abs(numpy.std(values) / numpy.mean(values))
else:
@@ -649,14 +427,7 @@ class ComparisonResult(Result):
class PValueResult(ComparisonResult):
"""P-value."""
- def __init__(self, ignore_min_max=False):
- super(PValueResult, self).__init__()
- self.ignore_min_max = ignore_min_max
-
def _ComputeFloat(self, cell, values, baseline_values):
- if self.ignore_min_max:
- values = _RemoveMinMax(cell, values)
- baseline_values = _RemoveMinMax(cell, baseline_values)
if len(values) < 2 or len(baseline_values) < 2:
cell.value = float('nan')
return
@@ -671,13 +442,6 @@ class KeyAwareComparisonResult(ComparisonResult):
"""Automatic key aware comparison."""
def _IsLowerBetter(self, key):
- # Units in histograms should include directions
- if 'smallerIsBetter' in key:
- return True
- if 'biggerIsBetter' in key:
- return False
-
- # For units in chartjson:
# TODO(llozano): Trying to guess direction by looking at the name of the
# test does not seem like a good idea. Test frameworks should provide this
# info explicitly. I believe Telemetry has this info. Need to find it out.
@@ -704,7 +468,7 @@ class KeyAwareComparisonResult(ComparisonResult):
'dropped_percent', '(ms)', '(seconds)', '--ms',
'--average_num_missing_tiles', '--experimental_jank',
'--experimental_mean_frame', '--experimental_median_frame_time',
- '--total_deferred_image_decode_count', '--seconds', 'samples'
+ '--total_deferred_image_decode_count', '--seconds'
]
return any([l in key for l in lower_is_better_keys])
@@ -718,14 +482,7 @@ class KeyAwareComparisonResult(ComparisonResult):
class AmeanRatioResult(KeyAwareComparisonResult):
"""Ratio of arithmetic means of values vs. baseline values."""
- def __init__(self, ignore_min_max=False):
- super(AmeanRatioResult, self).__init__()
- self.ignore_min_max = ignore_min_max
-
def _ComputeFloat(self, cell, values, baseline_values):
- if self.ignore_min_max:
- values = _RemoveMinMax(cell, values)
- baseline_values = _RemoveMinMax(cell, baseline_values)
if numpy.mean(baseline_values) != 0:
cell.value = numpy.mean(values) / numpy.mean(baseline_values)
elif numpy.mean(values) != 0:
@@ -739,14 +496,7 @@ class AmeanRatioResult(KeyAwareComparisonResult):
class GmeanRatioResult(KeyAwareComparisonResult):
"""Ratio of geometric means of values vs. baseline values."""
- def __init__(self, ignore_min_max=False):
- super(GmeanRatioResult, self).__init__()
- self.ignore_min_max = ignore_min_max
-
def _ComputeFloat(self, cell, values, baseline_values):
- if self.ignore_min_max:
- values = _RemoveMinMax(cell, values)
- baseline_values = _RemoveMinMax(cell, baseline_values)
if self._GetGmean(baseline_values) != 0:
cell.value = self._GetGmean(values) / self._GetGmean(baseline_values)
elif self._GetGmean(values) != 0:
@@ -867,13 +617,6 @@ class PValueFormat(Format):
power=1)
-class WeightFormat(Format):
- """Formatting for weight in cwp mode."""
-
- def _ComputeFloat(self, cell):
- cell.string_value = '%0.4f' % float(cell.value)
-
-
class StorageFormat(Format):
"""Format the cell as a storage number.
@@ -923,7 +666,8 @@ class PercentFormat(Format):
def _ComputeFloat(self, cell):
cell.string_value = '%+1.1f%%' % ((float(cell.value) - 1) * 100)
- cell.color = self._GetColor(cell.value, Color(255, 0, 0, 0),
+ cell.color = self._GetColor(cell.value,
+ Color(255, 0, 0, 0),
Color(0, 0, 0, 0), Color(0, 255, 0, 0))
@@ -936,7 +680,8 @@ class RatioFormat(Format):
def _ComputeFloat(self, cell):
cell.string_value = '%+1.1f%%' % ((cell.value - 1) * 100)
- cell.color = self._GetColor(cell.value, Color(255, 0, 0, 0),
+ cell.color = self._GetColor(cell.value,
+ Color(255, 0, 0, 0),
Color(0, 0, 0, 0), Color(0, 255, 0, 0))
@@ -952,7 +697,8 @@ class ColorBoxFormat(Format):
def _ComputeFloat(self, cell):
cell.string_value = '--'
- bgcolor = self._GetColor(cell.value, Color(255, 0, 0, 0),
+ bgcolor = self._GetColor(cell.value,
+ Color(255, 0, 0, 0),
Color(255, 255, 255, 0), Color(0, 255, 0, 0))
cell.bgcolor = bgcolor
cell.color = bgcolor
@@ -1031,19 +777,15 @@ class TableFormatter(object):
formats to apply to the table and returns a table of cells.
"""
- def __init__(self, table, columns, samples_table=False):
+ def __init__(self, table, columns):
"""The constructor takes in a table and a list of columns.
Args:
table: A list of lists of values.
- columns: A list of column containing what to produce and how to format
- it.
- samples_table: A flag to check whether we are generating a table of
- samples in CWP apporximation mode.
+ columns: A list of column containing what to produce and how to format it.
"""
self._table = table
self._columns = columns
- self._samples_table = samples_table
self._table_columns = []
self._out_table = []
@@ -1052,48 +794,30 @@ class TableFormatter(object):
all_failed = False
for row in self._table[1:]:
- # If we are generating samples_table, the second value will be weight
- # rather than values.
- start_col = 2 if self._samples_table else 1
# It does not make sense to put retval in the summary table.
if str(row[0]) == 'retval' and table_type == 'summary':
# Check to see if any runs passed, and update all_failed.
all_failed = True
- for values in row[start_col:]:
+ for values in row[1:]:
if 0 in values:
all_failed = False
continue
key = Cell()
key.string_value = str(row[0])
out_row = [key]
- if self._samples_table:
- # Add one column for weight if in samples_table mode
- weight = Cell()
- weight.value = row[1]
- f = WeightFormat()
- f.Compute(weight)
- out_row.append(weight)
baseline = None
- for results in row[start_col:]:
- column_start = 0
- values = None
- # If generating sample table, we will split a tuple of iterations info
- # from the results
- if isinstance(results, tuple):
- it, values = results
- column_start = 1
- cell = Cell()
- cell.string_value = '[%d: %d]' % (it[0], it[1])
- out_row.append(cell)
- if not row_index:
- self._table_columns.append(self._columns[0])
- else:
- values = results
- # Parse each column
- for column in self._columns[column_start:]:
+ for values in row[1:]:
+ for column in self._columns:
cell = Cell()
cell.name = key.string_value
- if not column.result.NeedsBaseline() or baseline is not None:
+ if column.result.NeedsBaseline():
+ if baseline is not None:
+ column.result.Compute(cell, values, baseline)
+ column.fmt.Compute(cell)
+ out_row.append(cell)
+ if not row_index:
+ self._table_columns.append(column)
+ else:
column.result.Compute(cell, values, baseline)
column.fmt.Compute(cell)
out_row.append(cell)
@@ -1129,13 +853,8 @@ class TableFormatter(object):
"""Generate Column name at the top of table."""
key = Cell()
key.header = True
- key.string_value = 'Keys' if not self._samples_table else 'Benchmarks'
+ key.string_value = 'Keys'
header = [key]
- if self._samples_table:
- weight = Cell()
- weight.header = True
- weight.string_value = 'Weights'
- header.append(weight)
for column in self._table_columns:
cell = Cell()
cell.header = True
@@ -1196,7 +915,7 @@ class TableFormatter(object):
# Put the number of pass/fail iterations in the image label header.
if column_position > 0 and retval_row:
retval_values = retval_row[column_position]
- if isinstance(retval_values, list):
+ if type(retval_values) is list:
passes, fails = self.GetPassesAndFails(retval_values)
cell.string_value = str(label) + ' (pass:%d fail:%d)' % (passes,
fails)
@@ -1205,13 +924,9 @@ class TableFormatter(object):
else:
cell.string_value = str(label)
if top_header:
- if not self._samples_table or (self._samples_table and
- len(top_header) == 2):
- cell.colspan = base_colspan
+ cell.colspan = base_colspan
if len(top_header) > 1:
- if not self._samples_table or (self._samples_table and
- len(top_header) > 2):
- cell.colspan = compare_colspan
+ cell.colspan = compare_colspan
top_header.append(cell)
column_position = column_position + 1
self._out_table = [top_header] + self._out_table
@@ -1420,12 +1135,8 @@ class TablePrinter(object):
def GetSimpleTable(table, out_to=TablePrinter.CONSOLE):
"""Prints a simple table.
- This is used by code that has a very simple list-of-lists and wants to
- produce a table with ameans, a percentage ratio of ameans and a colorbox.
-
- Examples:
- GetSimpleConsoleTable([["binary", "b1", "b2"],["size", "300", "400"]])
- will produce a colored table that can be printed to the console.
+ This is used by code that has a very simple list-of-lists and wants to produce
+ a table with ameans, a percentage ratio of ameans and a colorbox.
Args:
table: a list of lists.
@@ -1433,6 +1144,10 @@ def GetSimpleTable(table, out_to=TablePrinter.CONSOLE):
Returns:
A string version of the table that can be printed to the console.
+
+ Example:
+ GetSimpleConsoleTable([["binary", "b1", "b2"],["size", "300", "400"]])
+ will produce a colored table that can be printed to the console.
"""
columns = [
Column(AmeanResult(), Format()),
@@ -1470,15 +1185,15 @@ def GetComplexTable(runs, labels, out_to=TablePrinter.CONSOLE):
tg = TableGenerator(runs, labels, TableGenerator.SORT_BY_VALUES_DESC)
table = tg.GetTable()
columns = [
- Column(LiteralResult(), Format(), 'Literal'),
- Column(AmeanResult(), Format()),
- Column(StdResult(), Format()),
- Column(CoeffVarResult(), CoeffVarFormat()),
- Column(NonEmptyCountResult(), Format()),
- Column(AmeanRatioResult(), PercentFormat()),
- Column(AmeanRatioResult(), RatioFormat()),
- Column(GmeanRatioResult(), RatioFormat()),
- Column(PValueResult(), PValueFormat())
+ Column(LiteralResult(), Format(), 'Literal'), Column(
+ AmeanResult(), Format()), Column(StdResult(), Format()), Column(
+ CoeffVarResult(), CoeffVarFormat()), Column(
+ NonEmptyCountResult(), Format()),
+ Column(AmeanRatioResult(), PercentFormat()), Column(
+ AmeanRatioResult(), RatioFormat()), Column(GmeanRatioResult(),
+ RatioFormat()), Column(
+ PValueResult(),
+ PValueFormat())
]
tf = TableFormatter(table, columns)
cell_table = tf.GetCellTable()
@@ -1498,29 +1213,27 @@ if __name__ == '__main__':
'k8': 'PASS',
'k9': 'PASS',
'k10': '0'
- },
- {
- 'k1': '13',
- 'k2': '14',
- 'k3': '15',
- 'ms_1': '10',
- 'k8': 'PASS',
- 'k9': 'FAIL',
- 'k10': '0'
- }],
- [{
- 'k1': '50',
- 'k2': '51',
- 'k3': '52',
- 'k4': '53',
- 'k5': '35',
- 'k6': '45',
- 'ms_1': '200',
- 'ms_2': '20',
- 'k7': 'FAIL',
- 'k8': 'PASS',
- 'k9': 'PASS'
- }]]
+ }, {
+ 'k1': '13',
+ 'k2': '14',
+ 'k3': '15',
+ 'ms_1': '10',
+ 'k8': 'PASS',
+ 'k9': 'FAIL',
+ 'k10': '0'
+ }], [{
+ 'k1': '50',
+ 'k2': '51',
+ 'k3': '52',
+ 'k4': '53',
+ 'k5': '35',
+ 'k6': '45',
+ 'ms_1': '200',
+ 'ms_2': '20',
+ 'k7': 'FAIL',
+ 'k8': 'PASS',
+ 'k9': 'PASS'
+ }]]
labels = ['vanilla', 'modified']
t = GetComplexTable(runs, labels, TablePrinter.CONSOLE)
print(t)
diff --git a/cros_utils/tabulator_test.py b/cros_utils/tabulator_test.py
index 33c8da25..21cd1e73 100644
--- a/cros_utils/tabulator_test.py
+++ b/cros_utils/tabulator_test.py
@@ -1,7 +1,4 @@
-# -*- coding: utf-8 -*-
-# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
+# Copyright 2012 Google Inc. All Rights Reserved.
"""Tests for the tabulator module."""
from __future__ import print_function
@@ -73,22 +70,14 @@ class TabulatorTest(unittest.TestCase):
b = tabulator.Result()._GetGmean(a)
self.assertTrue(b >= 0.99e+308 and b <= 1.01e+308)
- def testIgnoreMinMax(self):
- amr = tabulator.AmeanResult(ignore_min_max=True)
- cell = tabulator.Cell()
- values = [1, 2]
- amr.Compute(cell, values, None)
- self.assertTrue(cell.value == 1.5)
- values = [1, 2, 8]
- amr.Compute(cell, values, None)
- self.assertTrue(cell.value == 2)
-
def testTableGenerator(self):
- # yapf: disable
- runs = [[{'k1': '10', 'k2': '12'},
- {'k1': '13', 'k2': '14', 'k3': '15'}],
- [{'k1': '50', 'k2': '51', 'k3': '52', 'k4': '53'}]]
- # yapf: enable
+ runs = [[{'k1': '10',
+ 'k2': '12'}, {'k1': '13',
+ 'k2': '14',
+ 'k3': '15'}], [{'k1': '50',
+ 'k2': '51',
+ 'k3': '52',
+ 'k4': '53'}]]
labels = ['vanilla', 'modified']
tg = tabulator.TableGenerator(runs, labels)
table = tg.GetTable()
@@ -114,52 +103,6 @@ class TabulatorTest(unittest.TestCase):
table = tf.GetCellTable()
self.assertTrue(table)
- def testSamplesTableGenerator(self):
- # yapf: disable
- keyvals = {
- 'bench1': [[{'samples': 1}, {'samples': 2}],
- [{'samples': 3}, {'samples': 4}]],
- 'bench2': [[{'samples': 5}, {}],
- [{'samples': 6}, {'samples': 7}]]
- }
- # yapf: enable
- weights = {'bench1': 0.2, 'bench2': 0.7}
- iter_counts = {'bench1': 2, 'bench2': 2}
- labels = ['vanilla', 'modified']
- tg = tabulator.SamplesTableGenerator(keyvals, labels, iter_counts, weights)
- (table, new_keyvals, new_iter_counts) = tg.GetTable()
-
- columns = [
- tabulator.Column(tabulator.IterationResult(), tabulator.Format()),
- tabulator.Column(tabulator.AmeanResult(), tabulator.Format()),
- tabulator.Column(tabulator.AmeanRatioResult(),
- tabulator.PercentFormat()),
- ]
- # This is the function to load column info.
- tf = tabulator.TableFormatter(table, columns, samples_table=True)
- # This is the function where to do all weighting calculation.
- cell_table = tf.GetCellTable('summary')
- self.assertTrue(cell_table)
-
- header = table.pop(0)
- self.assertTrue(header == ['Benchmarks', 'Weights', 'vanilla', 'modified'])
- row = table.pop(0)
- # yapf: disable
- self.assertTrue(row == ['bench1', 0.2,
- ((2, 0), [1 * 0.2, 2 * 0.2]),
- ((2, 0), [3 * 0.2, 4 * 0.2])])
- row = table.pop(0)
- self.assertTrue(row == ['bench2', 0.7,
- ((1, 1), [5 * 0.7, None]),
- ((2, 0), [6 * 0.7, 7 * 0.7])])
- row = table.pop(0)
- self.assertTrue(row == ['Composite Benchmark (samples)', 'N/A',
- ((1, 1), [1 * 0.2 + 5 * 0.7, None]),
- ((2, 0), [3 * 0.2 + 6 * 0.7, 4 * 0.2 + 7 * 0.7])])
- # yapf: enable
- self.assertTrue('Composite Benchmark' in new_keyvals.keys())
- self.assertTrue('Composite Benchmark' in new_iter_counts.keys())
-
def testColspan(self):
simple_table = [
['binary', 'b1', 'b2', 'b3'],