aboutsummaryrefslogtreecommitdiff
path: root/cros_utils
diff options
context:
space:
mode:
Diffstat (limited to 'cros_utils')
-rw-r--r--cros_utils/__init__.py2
-rwxr-xr-xcros_utils/bugs.py168
-rwxr-xr-xcros_utils/bugs_test.py211
-rw-r--r--cros_utils/buildbot_utils.py485
-rwxr-xr-xcros_utils/buildbot_utils_unittest.py384
-rwxr-xr-xcros_utils/command_executer.py1419
-rwxr-xr-xcros_utils/command_executer_timeout_test.py27
-rwxr-xr-xcros_utils/command_executer_unittest.py35
-rw-r--r--cros_utils/constants.py8
-rw-r--r--cros_utils/device_setup_utils.py1112
-rwxr-xr-xcros_utils/device_setup_utils_unittest.py1321
-rwxr-xr-xcros_utils/email_sender.py513
-rwxr-xr-xcros_utils/email_sender_unittest.py204
-rw-r--r--cros_utils/file_utils.py140
-rw-r--r--cros_utils/html_tools.py47
-rw-r--r--cros_utils/locks.py67
-rw-r--r--cros_utils/logger.py679
-rw-r--r--cros_utils/machines.py29
-rw-r--r--cros_utils/misc.py786
-rwxr-xr-xcros_utils/misc_test.py89
-rwxr-xr-xcros_utils/no_pseudo_terminal_test.py74
-rwxr-xr-xcros_utils/perf_diff.py590
-rw-r--r--cros_utils/tabulator.py2703
-rwxr-xr-xcros_utils/tabulator_test.py3
-rw-r--r--cros_utils/timeline.py78
-rwxr-xr-xcros_utils/timeline_test.py91
-rw-r--r--cros_utils/tiny_render.py171
-rwxr-xr-xcros_utils/tiny_render_test.py341
-rw-r--r--cros_utils/toolchain_utils.sh2
29 files changed, 6205 insertions, 5574 deletions
diff --git a/cros_utils/__init__.py b/cros_utils/__init__.py
index 4c4e5554..d365cb0c 100644
--- a/cros_utils/__init__.py
+++ b/cros_utils/__init__.py
@@ -1,4 +1,4 @@
# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
diff --git a/cros_utils/bugs.py b/cros_utils/bugs.py
index 88fb7675..43e0e553 100755
--- a/cros_utils/bugs.py
+++ b/cros_utils/bugs.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python3
-# Copyright 2021 The Chromium OS Authors. All rights reserved.
+# Copyright 2021 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -12,93 +12,103 @@ import json
import os
from typing import Any, Dict, List, Optional
-X20_PATH = '/google/data/rw/teams/c-compiler-chrome/prod_bugs'
+
+X20_PATH = "/google/data/rw/teams/c-compiler-chrome/prod_bugs"
class WellKnownComponents(enum.IntEnum):
- """A listing of "well-known" components recognized by our infra."""
- CrOSToolchainPublic = -1
- CrOSToolchainPrivate = -2
+ """A listing of "well-known" components recognized by our infra."""
+
+ CrOSToolchainPublic = -1
+ CrOSToolchainPrivate = -2
def _WriteBugJSONFile(object_type: str, json_object: Dict[str, Any]):
- """Writes a JSON file to X20_PATH with the given bug-ish object."""
- final_object = {
- 'type': object_type,
- 'value': json_object,
- }
-
- # The name of this has two parts:
- # - An easily sortable time, to provide uniqueness and let our service send
- # things in the order they were put into the outbox.
- # - 64 bits of entropy, so two racing bug writes don't clobber the same file.
- now = datetime.datetime.utcnow().isoformat('T', 'seconds') + 'Z'
- entropy = base64.urlsafe_b64encode(os.getrandom(8))
- entropy_str = entropy.rstrip(b'=').decode('utf-8')
- file_path = os.path.join(X20_PATH, f'{now}_{entropy_str}.json')
-
- temp_path = file_path + '.in_progress'
- try:
- with open(temp_path, 'w') as f:
- json.dump(final_object, f)
- os.rename(temp_path, file_path)
- except:
- os.remove(temp_path)
- raise
- return file_path
+ """Writes a JSON file to X20_PATH with the given bug-ish object."""
+ final_object = {
+ "type": object_type,
+ "value": json_object,
+ }
+
+ # The name of this has two parts:
+ # - An easily sortable time, to provide uniqueness and let our service send
+ # things in the order they were put into the outbox.
+ # - 64 bits of entropy, so two racing bug writes don't clobber the same file.
+ now = datetime.datetime.utcnow().isoformat("T", "seconds") + "Z"
+ entropy = base64.urlsafe_b64encode(os.getrandom(8))
+ entropy_str = entropy.rstrip(b"=").decode("utf-8")
+ file_path = os.path.join(X20_PATH, f"{now}_{entropy_str}.json")
+
+ temp_path = file_path + ".in_progress"
+ try:
+ with open(temp_path, "w") as f:
+ json.dump(final_object, f)
+ os.rename(temp_path, file_path)
+ except:
+ os.remove(temp_path)
+ raise
+ return file_path
def AppendToExistingBug(bug_id: int, body: str):
- """Sends a reply to an existing bug."""
- _WriteBugJSONFile('AppendToExistingBugRequest', {
- 'body': body,
- 'bug_id': bug_id,
- })
-
-
-def CreateNewBug(component_id: int,
- title: str,
- body: str,
- assignee: Optional[str] = None,
- cc: Optional[List[str]] = None):
- """Sends a request to create a new bug.
-
- Args:
- component_id: The component ID to add. Anything from WellKnownComponents
- also works.
- title: Title of the bug. Must be nonempty.
- body: Body of the bug. Must be nonempty.
- assignee: Assignee of the bug. Must be either an email address, or a
- "well-known" assignee (detective, mage).
- cc: A list of emails to add to the CC list. Must either be an email
- address, or a "well-known" individual (detective, mage).
- """
- obj = {
- 'component_id': component_id,
- 'subject': title,
- 'body': body,
- }
-
- if assignee:
- obj['assignee'] = assignee
-
- if cc:
- obj['cc'] = cc
-
- _WriteBugJSONFile('FileNewBugRequest', obj)
+ """Sends a reply to an existing bug."""
+ _WriteBugJSONFile(
+ "AppendToExistingBugRequest",
+ {
+ "body": body,
+ "bug_id": bug_id,
+ },
+ )
+
+
+def CreateNewBug(
+ component_id: int,
+ title: str,
+ body: str,
+ assignee: Optional[str] = None,
+ cc: Optional[List[str]] = None,
+):
+ """Sends a request to create a new bug.
+
+ Args:
+ component_id: The component ID to add. Anything from WellKnownComponents
+ also works.
+ title: Title of the bug. Must be nonempty.
+ body: Body of the bug. Must be nonempty.
+ assignee: Assignee of the bug. Must be either an email address, or a
+ "well-known" assignee (detective, mage).
+ cc: A list of emails to add to the CC list. Must either be an email
+ address, or a "well-known" individual (detective, mage).
+ """
+ obj = {
+ "component_id": component_id,
+ "subject": title,
+ "body": body,
+ }
+
+ if assignee:
+ obj["assignee"] = assignee
+
+ if cc:
+ obj["cc"] = cc
+
+ _WriteBugJSONFile("FileNewBugRequest", obj)
def SendCronjobLog(cronjob_name: str, failed: bool, message: str):
- """Sends the record of a cronjob to our bug infra.
-
- cronjob_name: The name of the cronjob. Expected to remain consistent over
- time.
- failed: Whether the job failed or not.
- message: Any seemingly relevant context. This is pasted verbatim in a bug, if
- the cronjob infra deems it worthy.
- """
- _WriteBugJSONFile('ChrotomationCronjobUpdate', {
- 'name': cronjob_name,
- 'message': message,
- 'failed': failed,
- })
+ """Sends the record of a cronjob to our bug infra.
+
+ cronjob_name: The name of the cronjob. Expected to remain consistent over
+ time.
+ failed: Whether the job failed or not.
+ message: Any seemingly relevant context. This is pasted verbatim in a bug, if
+ the cronjob infra deems it worthy.
+ """
+ _WriteBugJSONFile(
+ "ChrotomationCronjobUpdate",
+ {
+ "name": cronjob_name,
+ "message": message,
+ "failed": failed,
+ },
+ )
diff --git a/cros_utils/bugs_test.py b/cros_utils/bugs_test.py
index 03dee64d..5a07dbd8 100755
--- a/cros_utils/bugs_test.py
+++ b/cros_utils/bugs_test.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python3
-# Copyright 2021 The Chromium OS Authors. All rights reserved.
+# Copyright 2021 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -17,108 +17,115 @@ import bugs
class Tests(unittest.TestCase):
- """Tests for the bugs module."""
- def testWritingJSONFileSeemsToWork(self):
- """Tests JSON file writing."""
- old_x20_path = bugs.X20_PATH
-
- def restore_x20_path():
- bugs.X20_PATH = old_x20_path
-
- self.addCleanup(restore_x20_path)
-
- with tempfile.TemporaryDirectory() as tempdir:
- bugs.X20_PATH = tempdir
- file_path = bugs._WriteBugJSONFile(
- 'ObjectType', {
- 'foo': 'bar',
- 'baz': bugs.WellKnownComponents.CrOSToolchainPublic,
- })
-
- self.assertTrue(file_path.startswith(tempdir),
- f'Expected {file_path} to start with {tempdir}')
-
- with open(file_path) as f:
- self.assertEqual(
- json.load(f),
- {
- 'type': 'ObjectType',
- 'value': {
- 'foo': 'bar',
- 'baz': int(bugs.WellKnownComponents.CrOSToolchainPublic),
+ """Tests for the bugs module."""
+
+ def testWritingJSONFileSeemsToWork(self):
+ """Tests JSON file writing."""
+ old_x20_path = bugs.X20_PATH
+
+ def restore_x20_path():
+ bugs.X20_PATH = old_x20_path
+
+ self.addCleanup(restore_x20_path)
+
+ with tempfile.TemporaryDirectory() as tempdir:
+ bugs.X20_PATH = tempdir
+ file_path = bugs._WriteBugJSONFile(
+ "ObjectType",
+ {
+ "foo": "bar",
+ "baz": bugs.WellKnownComponents.CrOSToolchainPublic,
},
+ )
+
+ self.assertTrue(
+ file_path.startswith(tempdir),
+ f"Expected {file_path} to start with {tempdir}",
+ )
+
+ with open(file_path) as f:
+ self.assertEqual(
+ json.load(f),
+ {
+ "type": "ObjectType",
+ "value": {
+ "foo": "bar",
+ "baz": int(
+ bugs.WellKnownComponents.CrOSToolchainPublic
+ ),
+ },
+ },
+ )
+
+ @patch("bugs._WriteBugJSONFile")
+ def testAppendingToBugsSeemsToWork(self, mock_write_json_file):
+ """Tests AppendToExistingBug."""
+ bugs.AppendToExistingBug(1234, "hello, world!")
+ mock_write_json_file.assert_called_once_with(
+ "AppendToExistingBugRequest",
+ {
+ "body": "hello, world!",
+ "bug_id": 1234,
},
)
- @patch('bugs._WriteBugJSONFile')
- def testAppendingToBugsSeemsToWork(self, mock_write_json_file):
- """Tests AppendToExistingBug."""
- bugs.AppendToExistingBug(1234, 'hello, world!')
- mock_write_json_file.assert_called_once_with(
- 'AppendToExistingBugRequest',
- {
- 'body': 'hello, world!',
- 'bug_id': 1234,
- },
- )
-
- @patch('bugs._WriteBugJSONFile')
- def testBugCreationSeemsToWork(self, mock_write_json_file):
- """Tests CreateNewBug."""
- test_case_additions = (
- {},
- {
- 'component_id': bugs.WellKnownComponents.CrOSToolchainPublic,
- },
- {
- 'assignee': 'foo@gbiv.com',
- 'cc': ['bar@baz.com'],
- },
- )
-
- for additions in test_case_additions:
- test_case = {
- 'component_id': 123,
- 'title': 'foo',
- 'body': 'bar',
- **additions,
- }
-
- bugs.CreateNewBug(**test_case)
-
- expected_output = {
- 'component_id': test_case['component_id'],
- 'subject': test_case['title'],
- 'body': test_case['body'],
- }
-
- assignee = test_case.get('assignee')
- if assignee:
- expected_output['assignee'] = assignee
-
- cc = test_case.get('cc')
- if cc:
- expected_output['cc'] = cc
-
- mock_write_json_file.assert_called_once_with(
- 'FileNewBugRequest',
- expected_output,
- )
- mock_write_json_file.reset_mock()
-
- @patch('bugs._WriteBugJSONFile')
- def testCronjobLogSendingSeemsToWork(self, mock_write_json_file):
- """Tests SendCronjobLog."""
- bugs.SendCronjobLog('my_name', False, 'hello, world!')
- mock_write_json_file.assert_called_once_with(
- 'ChrotomationCronjobUpdate',
- {
- 'name': 'my_name',
- 'message': 'hello, world!',
- 'failed': False,
- },
- )
-
-
-if __name__ == '__main__':
- unittest.main()
+ @patch("bugs._WriteBugJSONFile")
+ def testBugCreationSeemsToWork(self, mock_write_json_file):
+ """Tests CreateNewBug."""
+ test_case_additions = (
+ {},
+ {
+ "component_id": bugs.WellKnownComponents.CrOSToolchainPublic,
+ },
+ {
+ "assignee": "foo@gbiv.com",
+ "cc": ["bar@baz.com"],
+ },
+ )
+
+ for additions in test_case_additions:
+ test_case = {
+ "component_id": 123,
+ "title": "foo",
+ "body": "bar",
+ **additions,
+ }
+
+ bugs.CreateNewBug(**test_case)
+
+ expected_output = {
+ "component_id": test_case["component_id"],
+ "subject": test_case["title"],
+ "body": test_case["body"],
+ }
+
+ assignee = test_case.get("assignee")
+ if assignee:
+ expected_output["assignee"] = assignee
+
+ cc = test_case.get("cc")
+ if cc:
+ expected_output["cc"] = cc
+
+ mock_write_json_file.assert_called_once_with(
+ "FileNewBugRequest",
+ expected_output,
+ )
+ mock_write_json_file.reset_mock()
+
+ @patch("bugs._WriteBugJSONFile")
+ def testCronjobLogSendingSeemsToWork(self, mock_write_json_file):
+ """Tests SendCronjobLog."""
+ bugs.SendCronjobLog("my_name", False, "hello, world!")
+ mock_write_json_file.assert_called_once_with(
+ "ChrotomationCronjobUpdate",
+ {
+ "name": "my_name",
+ "message": "hello, world!",
+ "failed": False,
+ },
+ )
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/cros_utils/buildbot_utils.py b/cros_utils/buildbot_utils.py
index b600c6aa..8f0ce5e0 100644
--- a/cros_utils/buildbot_utils.py
+++ b/cros_utils/buildbot_utils.py
@@ -1,12 +1,10 @@
# -*- coding: utf-8 -*-
-# Copyright 2017 The Chromium OS Authors. All rights reserved.
+# Copyright 2017 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utilities for launching and accessing ChromeOS buildbots."""
-from __future__ import division
-from __future__ import print_function
import ast
import json
@@ -17,6 +15,7 @@ import time
from cros_utils import command_executer
from cros_utils import logger
+
INITIAL_SLEEP_TIME = 7200 # 2 hours; wait time before polling buildbot.
SLEEP_TIME = 600 # 10 minutes; time between polling of buildbot.
@@ -26,267 +25,283 @@ TIME_OUT = 15 * 60 * 60 # Decide the build is dead or will never finish
class BuildbotTimeout(Exception):
- """Exception to throw when a buildbot operation timesout."""
+ """Exception to throw when a buildbot operation timesout."""
def RunCommandInPath(path, cmd):
- ce = command_executer.GetCommandExecuter()
- cwd = os.getcwd()
- os.chdir(path)
- status, stdout, stderr = ce.RunCommandWOutput(cmd, print_to_console=False)
- os.chdir(cwd)
- return status, stdout, stderr
+ ce = command_executer.GetCommandExecuter()
+ cwd = os.getcwd()
+ os.chdir(path)
+ status, stdout, stderr = ce.RunCommandWOutput(cmd, print_to_console=False)
+ os.chdir(cwd)
+ return status, stdout, stderr
def PeekTrybotImage(chromeos_root, buildbucket_id):
- """Get the artifact URL of a given tryjob.
+ """Get the artifact URL of a given tryjob.
- Args:
- buildbucket_id: buildbucket-id
- chromeos_root: root dir of chrome os checkout
+ Args:
+ buildbucket_id: buildbucket-id
+ chromeos_root: root dir of chrome os checkout
- Returns:
- (status, url) where status can be 'pass', 'fail', 'running',
- and url looks like:
- gs://chromeos-image-archive/trybot-elm-release-tryjob/R67-10468.0.0-b20789
- """
- command = ('cros buildresult --report json --buildbucket-id %s' %
- buildbucket_id)
- rc, out, _ = RunCommandInPath(chromeos_root, command)
+ Returns:
+ (status, url) where status can be 'pass', 'fail', 'running',
+ and url looks like:
+ gs://chromeos-image-archive/trybot-elm-release-tryjob/R67-10468.0.0-b20789
+ """
+ command = (
+ "cros buildresult --report json --buildbucket-id %s" % buildbucket_id
+ )
+ rc, out, _ = RunCommandInPath(chromeos_root, command)
- # Current implementation of cros buildresult returns fail when a job is still
- # running.
- if rc != 0:
- return ('running', None)
+ # Current implementation of cros buildresult returns fail when a job is still
+ # running.
+ if rc != 0:
+ return ("running", None)
- results = json.loads(out)[buildbucket_id]
+ results = json.loads(out)[buildbucket_id]
- # Handle the case where the tryjob failed to launch correctly.
- if results['artifacts_url'] is None:
- return (results['status'], '')
+ # Handle the case where the tryjob failed to launch correctly.
+ if results["artifacts_url"] is None:
+ return (results["status"], "")
- return (results['status'], results['artifacts_url'].rstrip('/'))
+ return (results["status"], results["artifacts_url"].rstrip("/"))
def ParseTryjobBuildbucketId(msg):
- """Find the buildbucket-id in the messages from `cros tryjob`.
-
- Args:
- msg: messages from `cros tryjob`
-
- Returns:
- buildbucket-id, which will be passed to `cros buildresult`
- """
- output_list = ast.literal_eval(msg)
- output_dict = output_list[0]
- if 'buildbucket_id' in output_dict:
- return output_dict['buildbucket_id']
- return None
-
-
-def SubmitTryjob(chromeos_root,
- buildbot_name,
- patch_list,
- tryjob_flags=None,
- build_toolchain=False):
- """Calls `cros tryjob ...`
-
- Args:
- chromeos_root: the path to the ChromeOS root, needed for finding chromite
- and launching the buildbot.
- buildbot_name: the name of the buildbot queue, such as lumpy-release or
- daisy-paladin.
- patch_list: a python list of the patches, if any, for the buildbot to use.
- tryjob_flags: See cros tryjob --help for available options.
- build_toolchain: builds and uses the latest toolchain, rather than the
- prebuilt one in SDK.
-
- Returns:
- buildbucket id
- """
- patch_arg = ''
- if patch_list:
- for p in patch_list:
- patch_arg = patch_arg + ' -g ' + repr(p)
- if not tryjob_flags:
- tryjob_flags = []
- if build_toolchain:
- tryjob_flags.append('--latest-toolchain')
- tryjob_flags = ' '.join(tryjob_flags)
-
- # Launch buildbot with appropriate flags.
- build = buildbot_name
- command = ('cros_sdk -- cros tryjob --yes --json --nochromesdk %s %s %s' %
- (tryjob_flags, patch_arg, build))
- print('CMD: %s' % command)
- _, out, _ = RunCommandInPath(chromeos_root, command)
- buildbucket_id = ParseTryjobBuildbucketId(out)
- print('buildbucket_id: %s' % repr(buildbucket_id))
- if not buildbucket_id:
- logger.GetLogger().LogFatal('Error occurred while launching trybot job: '
- '%s' % command)
- return buildbucket_id
-
-
-def GetTrybotImage(chromeos_root,
- buildbot_name,
- patch_list,
- tryjob_flags=None,
- build_toolchain=False,
- asynchronous=False):
- """Launch buildbot and get resulting trybot artifact name.
-
- This function launches a buildbot with the appropriate flags to
- build the test ChromeOS image, with the current ToT mobile compiler. It
- checks every 10 minutes to see if the trybot has finished. When the trybot
- has finished, it parses the resulting report logs to find the trybot
- artifact (if one was created), and returns that artifact name.
-
- Args:
- chromeos_root: the path to the ChromeOS root, needed for finding chromite
- and launching the buildbot.
- buildbot_name: the name of the buildbot queue, such as lumpy-release or
- daisy-paladin.
- patch_list: a python list of the patches, if any, for the buildbot to use.
- tryjob_flags: See cros tryjob --help for available options.
- build_toolchain: builds and uses the latest toolchain, rather than the
- prebuilt one in SDK.
- asynchronous: don't wait for artifacts; just return the buildbucket id
-
- Returns:
- (buildbucket id, partial image url) e.g.
- (8952271933586980528, trybot-elm-release-tryjob/R67-10480.0.0-b2373596)
- """
- buildbucket_id = SubmitTryjob(chromeos_root, buildbot_name, patch_list,
- tryjob_flags, build_toolchain)
- if asynchronous:
- return buildbucket_id, ' '
-
- # The trybot generally takes more than 2 hours to finish.
- # Wait two hours before polling the status.
- time.sleep(INITIAL_SLEEP_TIME)
- elapsed = INITIAL_SLEEP_TIME
- status = 'running'
- image = ''
- while True:
- status, image = PeekTrybotImage(chromeos_root, buildbucket_id)
- if status == 'running':
- if elapsed > TIME_OUT:
+ """Find the buildbucket-id in the messages from `cros tryjob`.
+
+ Args:
+ msg: messages from `cros tryjob`
+
+ Returns:
+ buildbucket-id, which will be passed to `cros buildresult`
+ """
+ output_list = ast.literal_eval(msg)
+ output_dict = output_list[0]
+ if "buildbucket_id" in output_dict:
+ return output_dict["buildbucket_id"]
+ return None
+
+
+def SubmitTryjob(
+ chromeos_root,
+ buildbot_name,
+ patch_list,
+ tryjob_flags=None,
+ build_toolchain=False,
+):
+ """Calls `cros tryjob ...`
+
+ Args:
+ chromeos_root: the path to the ChromeOS root, needed for finding chromite
+ and launching the buildbot.
+ buildbot_name: the name of the buildbot queue, such as lumpy-release or
+ daisy-paladin.
+ patch_list: a python list of the patches, if any, for the buildbot to use.
+ tryjob_flags: See cros tryjob --help for available options.
+ build_toolchain: builds and uses the latest toolchain, rather than the
+ prebuilt one in SDK.
+
+ Returns:
+ buildbucket id
+ """
+ patch_arg = ""
+ if patch_list:
+ for p in patch_list:
+ patch_arg = patch_arg + " -g " + repr(p)
+ if not tryjob_flags:
+ tryjob_flags = []
+ if build_toolchain:
+ tryjob_flags.append("--latest-toolchain")
+ tryjob_flags = " ".join(tryjob_flags)
+
+ # Launch buildbot with appropriate flags.
+ build = buildbot_name
+ command = "cros_sdk -- cros tryjob --yes --json --nochromesdk %s %s %s" % (
+ tryjob_flags,
+ patch_arg,
+ build,
+ )
+ print("CMD: %s" % command)
+ _, out, _ = RunCommandInPath(chromeos_root, command)
+ buildbucket_id = ParseTryjobBuildbucketId(out)
+ print("buildbucket_id: %s" % repr(buildbucket_id))
+ if not buildbucket_id:
logger.GetLogger().LogFatal(
- 'Unable to get build result for target %s.' % buildbot_name)
- else:
- wait_msg = 'Unable to find build result; job may be running.'
- logger.GetLogger().LogOutput(wait_msg)
- logger.GetLogger().LogOutput('{0} minutes elapsed.'.format(elapsed / 60))
- logger.GetLogger().LogOutput('Sleeping {0} seconds.'.format(SLEEP_TIME))
- time.sleep(SLEEP_TIME)
- elapsed += SLEEP_TIME
+ "Error occurred while launching trybot job: " "%s" % command
+ )
+ return buildbucket_id
+
+
+def GetTrybotImage(
+ chromeos_root,
+ buildbot_name,
+ patch_list,
+ tryjob_flags=None,
+ build_toolchain=False,
+ asynchronous=False,
+):
+ """Launch buildbot and get resulting trybot artifact name.
+
+ This function launches a buildbot with the appropriate flags to
+ build the test ChromeOS image, with the current ToT mobile compiler. It
+ checks every 10 minutes to see if the trybot has finished. When the trybot
+ has finished, it parses the resulting report logs to find the trybot
+ artifact (if one was created), and returns that artifact name.
+
+ Args:
+ chromeos_root: the path to the ChromeOS root, needed for finding chromite
+ and launching the buildbot.
+ buildbot_name: the name of the buildbot queue, such as lumpy-release or
+ daisy-paladin.
+ patch_list: a python list of the patches, if any, for the buildbot to use.
+ tryjob_flags: See cros tryjob --help for available options.
+ build_toolchain: builds and uses the latest toolchain, rather than the
+ prebuilt one in SDK.
+ asynchronous: don't wait for artifacts; just return the buildbucket id
+
+ Returns:
+ (buildbucket id, partial image url) e.g.
+ (8952271933586980528, trybot-elm-release-tryjob/R67-10480.0.0-b2373596)
+ """
+ buildbucket_id = SubmitTryjob(
+ chromeos_root, buildbot_name, patch_list, tryjob_flags, build_toolchain
+ )
+ if asynchronous:
+ return buildbucket_id, " "
+
+ # The trybot generally takes more than 2 hours to finish.
+ # Wait two hours before polling the status.
+ time.sleep(INITIAL_SLEEP_TIME)
+ elapsed = INITIAL_SLEEP_TIME
+ status = "running"
+ image = ""
+ while True:
+ status, image = PeekTrybotImage(chromeos_root, buildbucket_id)
+ if status == "running":
+ if elapsed > TIME_OUT:
+ logger.GetLogger().LogFatal(
+ "Unable to get build result for target %s." % buildbot_name
+ )
+ else:
+ wait_msg = "Unable to find build result; job may be running."
+ logger.GetLogger().LogOutput(wait_msg)
+ logger.GetLogger().LogOutput(f"{elapsed / 60} minutes elapsed.")
+ logger.GetLogger().LogOutput(f"Sleeping {SLEEP_TIME} seconds.")
+ time.sleep(SLEEP_TIME)
+ elapsed += SLEEP_TIME
+ else:
+ break
+
+ if not buildbot_name.endswith("-toolchain") and status == "fail":
+ # For rotating testers, we don't care about their status
+ # result, because if any HWTest failed it will be non-zero.
+ #
+ # The nightly performance tests do not run HWTests, so if
+ # their status is non-zero, we do care. In this case
+ # non-zero means the image itself probably did not build.
+ image = ""
+
+ if not image:
+ logger.GetLogger().LogError(
+ "Trybot job (buildbucket id: %s) failed with"
+ "status %s; no trybot image generated. " % (buildbucket_id, status)
+ )
else:
- break
-
- if not buildbot_name.endswith('-toolchain') and status == 'fail':
- # For rotating testers, we don't care about their status
- # result, because if any HWTest failed it will be non-zero.
- #
- # The nightly performance tests do not run HWTests, so if
- # their status is non-zero, we do care. In this case
- # non-zero means the image itself probably did not build.
- image = ''
-
- if not image:
- logger.GetLogger().LogError('Trybot job (buildbucket id: %s) failed with'
- 'status %s; no trybot image generated. ' %
- (buildbucket_id, status))
- else:
- # Convert full gs path to what crosperf expects. For example, convert
- # gs://chromeos-image-archive/trybot-elm-release-tryjob/R67-10468.0.0-b20789
- # to
- # trybot-elm-release-tryjob/R67-10468.0.0-b20789
- image = '/'.join(image.split('/')[-2:])
-
- logger.GetLogger().LogOutput("image is '%s'" % image)
- logger.GetLogger().LogOutput('status is %s' % status)
- return buildbucket_id, image
+ # Convert full gs path to what crosperf expects. For example, convert
+ # gs://chromeos-image-archive/trybot-elm-release-tryjob/R67-10468.0.0-b20789
+ # to
+ # trybot-elm-release-tryjob/R67-10468.0.0-b20789
+ image = "/".join(image.split("/")[-2:])
+
+ logger.GetLogger().LogOutput("image is '%s'" % image)
+ logger.GetLogger().LogOutput("status is %s" % status)
+ return buildbucket_id, image
def DoesImageExist(chromeos_root, build):
- """Check if the image for the given build exists."""
+ """Check if the image for the given build exists."""
- ce = command_executer.GetCommandExecuter()
- command = ('gsutil ls gs://chromeos-image-archive/%s'
- '/chromiumos_test_image.tar.xz' % (build))
- ret = ce.ChrootRunCommand(chromeos_root, command, print_to_console=False)
- return not ret
+ ce = command_executer.GetCommandExecuter()
+ command = (
+ "gsutil ls gs://chromeos-image-archive/%s"
+ "/chromiumos_test_image.tar.xz" % (build)
+ )
+ ret = ce.ChrootRunCommand(chromeos_root, command, print_to_console=False)
+ return not ret
def WaitForImage(chromeos_root, build):
- """Wait for an image to be ready."""
+ """Wait for an image to be ready."""
- elapsed_time = 0
- while elapsed_time < TIME_OUT:
- if DoesImageExist(chromeos_root, build):
- return
- logger.GetLogger().LogOutput('Image %s not ready, waiting for 10 minutes' %
- build)
- time.sleep(SLEEP_TIME)
- elapsed_time += SLEEP_TIME
+ elapsed_time = 0
+ while elapsed_time < TIME_OUT:
+ if DoesImageExist(chromeos_root, build):
+ return
+ logger.GetLogger().LogOutput(
+ "Image %s not ready, waiting for 10 minutes" % build
+ )
+ time.sleep(SLEEP_TIME)
+ elapsed_time += SLEEP_TIME
- logger.GetLogger().LogOutput('Image %s not found, waited for %d hours' %
- (build, (TIME_OUT / 3600)))
- raise BuildbotTimeout('Timeout while waiting for image %s' % build)
+ logger.GetLogger().LogOutput(
+ "Image %s not found, waited for %d hours" % (build, (TIME_OUT / 3600))
+ )
+ raise BuildbotTimeout("Timeout while waiting for image %s" % build)
def GetLatestImage(chromeos_root, path):
- """Get latest image"""
-
- fmt = re.compile(r'R([0-9]+)-([0-9]+).([0-9]+).([0-9]+)')
-
- ce = command_executer.GetCommandExecuter()
- command = ('gsutil ls gs://chromeos-image-archive/%s' % path)
- ret, out, _ = ce.ChrootRunCommandWOutput(
- chromeos_root, command, print_to_console=False)
- if ret != 0:
- raise RuntimeError('Failed to list buckets with command: %s.' % command)
- candidates = [l.split('/')[-2] for l in out.split()]
- candidates = [fmt.match(c) for c in candidates]
- candidates = [[int(r) for r in m.group(1, 2, 3, 4)] for m in candidates if m]
- candidates.sort(reverse=True)
- for c in candidates:
- build = '%s/R%d-%d.%d.%d' % (path, c[0], c[1], c[2], c[3])
- # Denylist "R79-12384.0.0" image released by mistake.
- # TODO(crbug.com/992242): Remove the filter by 2019-09-05.
- if c == [79, 12384, 0, 0]:
- continue
-
- if DoesImageExist(chromeos_root, build):
- return build
+ """Get latest image"""
+
+ fmt = re.compile(r"R([0-9]+)-([0-9]+).([0-9]+).([0-9]+)")
+
+ ce = command_executer.GetCommandExecuter()
+ command = "gsutil ls gs://chromeos-image-archive/%s" % path
+ ret, out, _ = ce.ChrootRunCommandWOutput(
+ chromeos_root, command, print_to_console=False
+ )
+ if ret != 0:
+ raise RuntimeError("Failed to list buckets with command: %s." % command)
+ candidates = [l.split("/")[-2] for l in out.split()]
+ candidates = [fmt.match(c) for c in candidates]
+ candidates = [
+ [int(r) for r in m.group(1, 2, 3, 4)] for m in candidates if m
+ ]
+ candidates.sort(reverse=True)
+ for c in candidates:
+ build = "%s/R%d-%d.%d.%d" % (path, c[0], c[1], c[2], c[3])
+ if DoesImageExist(chromeos_root, build):
+ return build
def GetLatestRecipeImage(chromeos_root, path):
- """Get latest nightly test image from recipe bucket.
-
- Image location example:
- $ARCHIVE/lulu-llvm-next-nightly/R84-13037.0.0-31011-8883172717979984032
- """
-
- fmt = re.compile(r'R([0-9]+)-([0-9]+).([0-9]+).([0-9]+)-([0-9]+)')
-
- ce = command_executer.GetCommandExecuter()
- command = ('gsutil ls gs://chromeos-image-archive/%s' % path)
- ret, out, _ = ce.ChrootRunCommandWOutput(
- chromeos_root, command, print_to_console=False)
- if ret != 0:
- raise RuntimeError('Failed to list buckets with command: %s.' % command)
- candidates = [l.split('/')[-2] for l in out.split()]
- candidates = [(fmt.match(c), c) for c in candidates]
- candidates = [([int(r)
- for r in m[0].group(1, 2, 3, 4, 5)], m[1])
- for m in candidates
- if m]
- candidates.sort(key=lambda x: x[0], reverse=True)
- # Try to get ony last two days of images since nightly tests are run once
- # another day.
- for c in candidates[:2]:
- build = '%s/%s' % (path, c[1])
- if DoesImageExist(chromeos_root, build):
- return build
+ """Get latest nightly test image from recipe bucket.
+
+ Image location example:
+ $ARCHIVE/lulu-llvm-next-nightly/R84-13037.0.0-31011-8883172717979984032
+ """
+
+ fmt = re.compile(r"R([0-9]+)-([0-9]+).([0-9]+).([0-9]+)-([0-9]+)")
+
+ ce = command_executer.GetCommandExecuter()
+ command = "gsutil ls gs://chromeos-image-archive/%s" % path
+ ret, out, _ = ce.ChrootRunCommandWOutput(
+ chromeos_root, command, print_to_console=False
+ )
+ if ret != 0:
+ raise RuntimeError("Failed to list buckets with command: %s." % command)
+ candidates = [l.split("/")[-2] for l in out.split()]
+ candidates = [(fmt.match(c), c) for c in candidates]
+ candidates = [
+ ([int(r) for r in m[0].group(1, 2, 3, 4, 5)], m[1])
+ for m in candidates
+ if m
+ ]
+ candidates.sort(key=lambda x: x[0], reverse=True)
+ # Try to get ony last two days of images since nightly tests are run once
+ # another day.
+ for c in candidates[:2]:
+ build = "%s/%s" % (path, c[1])
+ if DoesImageExist(chromeos_root, build):
+ return build
diff --git a/cros_utils/buildbot_utils_unittest.py b/cros_utils/buildbot_utils_unittest.py
index c615c95f..2c9585b5 100755
--- a/cros_utils/buildbot_utils_unittest.py
+++ b/cros_utils/buildbot_utils_unittest.py
@@ -1,16 +1,14 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
-# Copyright 2018 The Chromium OS Authors. All rights reserved.
+# Copyright 2018 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittest for buildbot_utils.py."""
-from __future__ import print_function
import time
-
import unittest
from unittest.mock import patch
@@ -19,160 +17,226 @@ from cros_utils import command_executer
class TrybotTest(unittest.TestCase):
- """Test for CommandExecuter class."""
-
- tryjob_out = (
- '[{"buildbucket_id": "8952721143823688176", "build_config": '
- '"cave-llvm-toolchain-tryjob", "url": '
- # pylint: disable=line-too-long
- '"http://cros-goldeneye/chromeos/healthmonitoring/buildDetails?buildbucketId=8952721143823688176"}]'
- )
-
- GSUTILS_LS = '\n'.join([
- 'gs://chromeos-image-archive/{0}/R78-12421.0.0/',
- 'gs://chromeos-image-archive/{0}/R78-12422.0.0/',
- 'gs://chromeos-image-archive/{0}/R78-12423.0.0/',
- ])
-
- GSUTILS_LS_RECIPE = '\n'.join([
- 'gs://chromeos-image-archive/{0}/R83-12995.0.0-30031-8885075268947031/',
- 'gs://chromeos-image-archive/{0}/R83-13003.0.0-30196-8884755532184725/',
- 'gs://chromeos-image-archive/{0}/R83-13003.0.0-30218-8884712858556419/',
- ])
-
- buildresult_out = (
- '{"8952721143823688176": {"status": "pass", "artifacts_url":'
- '"gs://chromeos-image-archive/trybot-elm-release-tryjob/R67-10468.0.0-'
- 'b20789"}}')
-
- buildbucket_id = '8952721143823688176'
- counter_1 = 10
-
- def testGetTrybotImage(self):
- with patch.object(buildbot_utils, 'SubmitTryjob') as mock_submit:
- with patch.object(buildbot_utils, 'PeekTrybotImage') as mock_peek:
- with patch.object(time, 'sleep', return_value=None):
-
- def peek(_chromeos_root, _buildbucket_id):
- self.counter_1 -= 1
- if self.counter_1 >= 0:
- return ('running', '')
- return ('pass',
- 'gs://chromeos-image-archive/trybot-elm-release-tryjob/'
- 'R67-10468.0.0-b20789')
-
- mock_peek.side_effect = peek
- mock_submit.return_value = self.buildbucket_id
-
- # sync
- buildbucket_id, image = buildbot_utils.GetTrybotImage(
- '/tmp', 'falco-release-tryjob', [])
- self.assertEqual(buildbucket_id, self.buildbucket_id)
- self.assertEqual('trybot-elm-release-tryjob/'
- 'R67-10468.0.0-b20789', image)
-
- # async
- buildbucket_id, image = buildbot_utils.GetTrybotImage(
- '/tmp', 'falco-release-tryjob', [], asynchronous=True)
- self.assertEqual(buildbucket_id, self.buildbucket_id)
- self.assertEqual(' ', image)
-
- def testSubmitTryjob(self):
- with patch.object(command_executer.CommandExecuter,
- 'RunCommandWOutput') as mocked_run:
- mocked_run.return_value = (0, self.tryjob_out, '')
- buildbucket_id = buildbot_utils.SubmitTryjob('/', 'falco-release-tryjob',
- [], [])
- self.assertEqual(buildbucket_id, self.buildbucket_id)
-
- def testPeekTrybotImage(self):
- with patch.object(command_executer.CommandExecuter,
- 'RunCommandWOutput') as mocked_run:
- # pass
- mocked_run.return_value = (0, self.buildresult_out, '')
- status, image = buildbot_utils.PeekTrybotImage('/', self.buildbucket_id)
- self.assertEqual('pass', status)
- self.assertEqual(
- 'gs://chromeos-image-archive/trybot-elm-release-tryjob/'
- 'R67-10468.0.0-b20789', image)
-
- # running
- mocked_run.return_value = (1, '', '')
- status, image = buildbot_utils.PeekTrybotImage('/', self.buildbucket_id)
- self.assertEqual('running', status)
- self.assertEqual(None, image)
-
- # fail
- buildresult_fail = self.buildresult_out.replace('\"pass\"', '\"fail\"')
- mocked_run.return_value = (0, buildresult_fail, '')
- status, image = buildbot_utils.PeekTrybotImage('/', self.buildbucket_id)
- self.assertEqual('fail', status)
- self.assertEqual(
- 'gs://chromeos-image-archive/trybot-elm-release-tryjob/'
- 'R67-10468.0.0-b20789', image)
-
- def testParseTryjobBuildbucketId(self):
- buildbucket_id = buildbot_utils.ParseTryjobBuildbucketId(self.tryjob_out)
- self.assertEqual(buildbucket_id, self.buildbucket_id)
-
- def testGetLatestImageValid(self):
- with patch.object(command_executer.CommandExecuter,
- 'ChrootRunCommandWOutput') as mocked_run:
- with patch.object(buildbot_utils, 'DoesImageExist') as mocked_imageexist:
- IMAGE_DIR = 'lulu-release'
- mocked_run.return_value = (0, self.GSUTILS_LS.format(IMAGE_DIR), '')
- mocked_imageexist.return_value = True
- image = buildbot_utils.GetLatestImage('', IMAGE_DIR)
- self.assertEqual(image, '{0}/R78-12423.0.0'.format(IMAGE_DIR))
-
- def testGetLatestImageInvalid(self):
- with patch.object(command_executer.CommandExecuter,
- 'ChrootRunCommandWOutput') as mocked_run:
- with patch.object(buildbot_utils, 'DoesImageExist') as mocked_imageexist:
- IMAGE_DIR = 'kefka-release'
- mocked_run.return_value = (0, self.GSUTILS_LS.format(IMAGE_DIR), '')
- mocked_imageexist.return_value = False
- image = buildbot_utils.GetLatestImage('', IMAGE_DIR)
- self.assertIsNone(image)
-
- def testGetLatestRecipeImageValid(self):
- with patch.object(command_executer.CommandExecuter,
- 'ChrootRunCommandWOutput') as mocked_run:
- with patch.object(buildbot_utils, 'DoesImageExist') as mocked_imageexist:
- IMAGE_DIR = 'lulu-llvm-next-nightly'
- mocked_run.return_value = (0, self.GSUTILS_LS_RECIPE.format(IMAGE_DIR),
- '')
- mocked_imageexist.return_value = True
- image = buildbot_utils.GetLatestRecipeImage('', IMAGE_DIR)
- self.assertEqual(
- image, '{0}/R83-13003.0.0-30218-8884712858556419'.format(IMAGE_DIR))
-
- def testGetLatestRecipeImageInvalid(self):
- with patch.object(command_executer.CommandExecuter,
- 'ChrootRunCommandWOutput') as mocked_run:
- with patch.object(buildbot_utils, 'DoesImageExist') as mocked_imageexist:
- IMAGE_DIR = 'kefka-llvm-next-nightly'
- mocked_run.return_value = (0, self.GSUTILS_LS_RECIPE.format(IMAGE_DIR),
- '')
- mocked_imageexist.return_value = False
- image = buildbot_utils.GetLatestRecipeImage('', IMAGE_DIR)
- self.assertIsNone(image)
-
- def testGetLatestRecipeImageTwodays(self):
- with patch.object(command_executer.CommandExecuter,
- 'ChrootRunCommandWOutput') as mocked_run:
- with patch.object(buildbot_utils, 'DoesImageExist') as mocked_imageexist:
- IMAGE_DIR = 'lulu-llvm-next-nightly'
- mocked_run.return_value = (0, self.GSUTILS_LS_RECIPE.format(IMAGE_DIR),
- '')
- mocked_imageexist.side_effect = [False, False, True]
- image = buildbot_utils.GetLatestRecipeImage('', IMAGE_DIR)
- self.assertIsNone(image)
- mocked_imageexist.side_effect = [False, True, True]
- image = buildbot_utils.GetLatestRecipeImage('', IMAGE_DIR)
- self.assertEqual(
- image, '{0}/R83-13003.0.0-30196-8884755532184725'.format(IMAGE_DIR))
-
-
-if __name__ == '__main__':
- unittest.main()
+ """Test for CommandExecuter class."""
+
+ tryjob_out = (
+ '[{"buildbucket_id": "8952721143823688176", "build_config": '
+ '"cave-llvm-toolchain-tryjob", "url": '
+ # pylint: disable=line-too-long
+ '"http://cros-goldeneye/chromeos/healthmonitoring/buildDetails?buildbucketId=8952721143823688176"}]'
+ )
+
+ GSUTILS_LS = "\n".join(
+ [
+ "gs://chromeos-image-archive/{0}/R78-12421.0.0/",
+ "gs://chromeos-image-archive/{0}/R78-12422.0.0/",
+ "gs://chromeos-image-archive/{0}/R78-12423.0.0/",
+ ]
+ )
+
+ GSUTILS_LS_RECIPE = "\n".join(
+ [
+ "gs://chromeos-image-archive/{0}/R83-12995.0.0-30031-8885075268947031/",
+ "gs://chromeos-image-archive/{0}/R83-13003.0.0-30196-8884755532184725/",
+ "gs://chromeos-image-archive/{0}/R83-13003.0.0-30218-8884712858556419/",
+ ]
+ )
+
+ buildresult_out = (
+ '{"8952721143823688176": {"status": "pass", "artifacts_url":'
+ '"gs://chromeos-image-archive/trybot-elm-release-tryjob/R67-10468.0.0-'
+ 'b20789"}}'
+ )
+
+ buildbucket_id = "8952721143823688176"
+ counter_1 = 10
+
+ def testGetTrybotImage(self):
+ with patch.object(buildbot_utils, "SubmitTryjob") as mock_submit:
+ with patch.object(buildbot_utils, "PeekTrybotImage") as mock_peek:
+ with patch.object(time, "sleep", return_value=None):
+
+ def peek(_chromeos_root, _buildbucket_id):
+ self.counter_1 -= 1
+ if self.counter_1 >= 0:
+ return ("running", "")
+ return (
+ "pass",
+ "gs://chromeos-image-archive/trybot-elm-release-tryjob/"
+ "R67-10468.0.0-b20789",
+ )
+
+ mock_peek.side_effect = peek
+ mock_submit.return_value = self.buildbucket_id
+
+ # sync
+ buildbucket_id, image = buildbot_utils.GetTrybotImage(
+ "/tmp", "falco-release-tryjob", []
+ )
+ self.assertEqual(buildbucket_id, self.buildbucket_id)
+ self.assertEqual(
+ "trybot-elm-release-tryjob/" "R67-10468.0.0-b20789",
+ image,
+ )
+
+ # async
+ buildbucket_id, image = buildbot_utils.GetTrybotImage(
+ "/tmp", "falco-release-tryjob", [], asynchronous=True
+ )
+ self.assertEqual(buildbucket_id, self.buildbucket_id)
+ self.assertEqual(" ", image)
+
+ def testSubmitTryjob(self):
+ with patch.object(
+ command_executer.CommandExecuter, "RunCommandWOutput"
+ ) as mocked_run:
+ mocked_run.return_value = (0, self.tryjob_out, "")
+ buildbucket_id = buildbot_utils.SubmitTryjob(
+ "/", "falco-release-tryjob", [], []
+ )
+ self.assertEqual(buildbucket_id, self.buildbucket_id)
+
+ def testPeekTrybotImage(self):
+ with patch.object(
+ command_executer.CommandExecuter, "RunCommandWOutput"
+ ) as mocked_run:
+ # pass
+ mocked_run.return_value = (0, self.buildresult_out, "")
+ status, image = buildbot_utils.PeekTrybotImage(
+ "/", self.buildbucket_id
+ )
+ self.assertEqual("pass", status)
+ self.assertEqual(
+ "gs://chromeos-image-archive/trybot-elm-release-tryjob/"
+ "R67-10468.0.0-b20789",
+ image,
+ )
+
+ # running
+ mocked_run.return_value = (1, "", "")
+ status, image = buildbot_utils.PeekTrybotImage(
+ "/", self.buildbucket_id
+ )
+ self.assertEqual("running", status)
+ self.assertEqual(None, image)
+
+ # fail
+ buildresult_fail = self.buildresult_out.replace('"pass"', '"fail"')
+ mocked_run.return_value = (0, buildresult_fail, "")
+ status, image = buildbot_utils.PeekTrybotImage(
+ "/", self.buildbucket_id
+ )
+ self.assertEqual("fail", status)
+ self.assertEqual(
+ "gs://chromeos-image-archive/trybot-elm-release-tryjob/"
+ "R67-10468.0.0-b20789",
+ image,
+ )
+
+ def testParseTryjobBuildbucketId(self):
+ buildbucket_id = buildbot_utils.ParseTryjobBuildbucketId(
+ self.tryjob_out
+ )
+ self.assertEqual(buildbucket_id, self.buildbucket_id)
+
+ def testGetLatestImageValid(self):
+ with patch.object(
+ command_executer.CommandExecuter, "ChrootRunCommandWOutput"
+ ) as mocked_run:
+ with patch.object(
+ buildbot_utils, "DoesImageExist"
+ ) as mocked_imageexist:
+ IMAGE_DIR = "lulu-release"
+ mocked_run.return_value = (
+ 0,
+ self.GSUTILS_LS.format(IMAGE_DIR),
+ "",
+ )
+ mocked_imageexist.return_value = True
+ image = buildbot_utils.GetLatestImage("", IMAGE_DIR)
+ self.assertEqual(image, "{0}/R78-12423.0.0".format(IMAGE_DIR))
+
+ def testGetLatestImageInvalid(self):
+ with patch.object(
+ command_executer.CommandExecuter, "ChrootRunCommandWOutput"
+ ) as mocked_run:
+ with patch.object(
+ buildbot_utils, "DoesImageExist"
+ ) as mocked_imageexist:
+ IMAGE_DIR = "kefka-release"
+ mocked_run.return_value = (
+ 0,
+ self.GSUTILS_LS.format(IMAGE_DIR),
+ "",
+ )
+ mocked_imageexist.return_value = False
+ image = buildbot_utils.GetLatestImage("", IMAGE_DIR)
+ self.assertIsNone(image)
+
+ def testGetLatestRecipeImageValid(self):
+ with patch.object(
+ command_executer.CommandExecuter, "ChrootRunCommandWOutput"
+ ) as mocked_run:
+ with patch.object(
+ buildbot_utils, "DoesImageExist"
+ ) as mocked_imageexist:
+ IMAGE_DIR = "lulu-llvm-next-nightly"
+ mocked_run.return_value = (
+ 0,
+ self.GSUTILS_LS_RECIPE.format(IMAGE_DIR),
+ "",
+ )
+ mocked_imageexist.return_value = True
+ image = buildbot_utils.GetLatestRecipeImage("", IMAGE_DIR)
+ self.assertEqual(
+ image,
+ "{0}/R83-13003.0.0-30218-8884712858556419".format(
+ IMAGE_DIR
+ ),
+ )
+
+ def testGetLatestRecipeImageInvalid(self):
+ with patch.object(
+ command_executer.CommandExecuter, "ChrootRunCommandWOutput"
+ ) as mocked_run:
+ with patch.object(
+ buildbot_utils, "DoesImageExist"
+ ) as mocked_imageexist:
+ IMAGE_DIR = "kefka-llvm-next-nightly"
+ mocked_run.return_value = (
+ 0,
+ self.GSUTILS_LS_RECIPE.format(IMAGE_DIR),
+ "",
+ )
+ mocked_imageexist.return_value = False
+ image = buildbot_utils.GetLatestRecipeImage("", IMAGE_DIR)
+ self.assertIsNone(image)
+
+ def testGetLatestRecipeImageTwodays(self):
+ with patch.object(
+ command_executer.CommandExecuter, "ChrootRunCommandWOutput"
+ ) as mocked_run:
+ with patch.object(
+ buildbot_utils, "DoesImageExist"
+ ) as mocked_imageexist:
+ IMAGE_DIR = "lulu-llvm-next-nightly"
+ mocked_run.return_value = (
+ 0,
+ self.GSUTILS_LS_RECIPE.format(IMAGE_DIR),
+ "",
+ )
+ mocked_imageexist.side_effect = [False, False, True]
+ image = buildbot_utils.GetLatestRecipeImage("", IMAGE_DIR)
+ self.assertIsNone(image)
+ mocked_imageexist.side_effect = [False, True, True]
+ image = buildbot_utils.GetLatestRecipeImage("", IMAGE_DIR)
+ self.assertEqual(
+ image,
+ "{0}/R83-13003.0.0-30196-8884755532184725".format(
+ IMAGE_DIR
+ ),
+ )
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/cros_utils/command_executer.py b/cros_utils/command_executer.py
index cc0f3372..573bb2d6 100755
--- a/cros_utils/command_executer.py
+++ b/cros_utils/command_executer.py
@@ -1,12 +1,11 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2011 The Chromium OS Authors. All rights reserved.
+# Copyright 2011 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utilities to run commands in outside/inside chroot and on the board."""
-from __future__ import print_function
import getpass
import os
@@ -20,699 +19,775 @@ import time
from cros_utils import logger
+
mock_default = False
-CHROMEOS_SCRIPTS_DIR = '/mnt/host/source/src/scripts'
-LOG_LEVEL = ('none', 'quiet', 'average', 'verbose')
+CHROMEOS_SCRIPTS_DIR = "/mnt/host/source/src/scripts"
+LOG_LEVEL = ("none", "quiet", "average", "verbose")
def InitCommandExecuter(mock=False):
- # pylint: disable=global-statement
- global mock_default
- # Whether to default to a mock command executer or not
- mock_default = mock
+ # pylint: disable=global-statement
+ global mock_default
+ # Whether to default to a mock command executer or not
+ mock_default = mock
-def GetCommandExecuter(logger_to_set=None, mock=False, log_level='verbose'):
- # If the default is a mock executer, always return one.
- if mock_default or mock:
- return MockCommandExecuter(log_level, logger_to_set)
- else:
- return CommandExecuter(log_level, logger_to_set)
+def GetCommandExecuter(logger_to_set=None, mock=False, log_level="verbose"):
+ # If the default is a mock executer, always return one.
+ if mock_default or mock:
+ return MockCommandExecuter(log_level, logger_to_set)
+ else:
+ return CommandExecuter(log_level, logger_to_set)
class CommandExecuter(object):
- """Provides several methods to execute commands on several environments."""
-
- def __init__(self, log_level, logger_to_set=None):
- self.log_level = log_level
- if log_level == 'none':
- self.logger = None
- else:
- if logger_to_set is not None:
- self.logger = logger_to_set
- else:
- self.logger = logger.GetLogger()
-
- def GetLogLevel(self):
- return self.log_level
-
- def SetLogLevel(self, log_level):
- self.log_level = log_level
-
- def RunCommandGeneric(self,
- cmd,
- return_output=False,
- machine=None,
- username=None,
- command_terminator=None,
- command_timeout=None,
- terminated_timeout=10,
- print_to_console=True,
- env=None,
- except_handler=lambda p, e: None):
- """Run a command.
-
- Returns triplet (returncode, stdout, stderr).
- """
-
- cmd = str(cmd)
-
- if self.log_level == 'quiet':
- print_to_console = False
-
- if self.log_level == 'verbose':
- self.logger.LogCmd(cmd, machine, username, print_to_console)
- elif self.logger:
- self.logger.LogCmdToFileOnly(cmd, machine, username)
- if command_terminator and command_terminator.IsTerminated():
- if self.logger:
- self.logger.LogError('Command was terminated!', print_to_console)
- return (1, '', '')
-
- if machine is not None:
- user = ''
- if username is not None:
- user = username + '@'
- cmd = "ssh -t -t %s%s -- '%s'" % (user, machine, cmd)
-
- # We use setsid so that the child will have a different session id
- # and we can easily kill the process group. This is also important
- # because the child will be disassociated from the parent terminal.
- # In this way the child cannot mess the parent's terminal.
- p = None
- try:
- # pylint: disable=bad-option-value, subprocess-popen-preexec-fn
- p = subprocess.Popen(cmd,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- shell=True,
- preexec_fn=os.setsid,
- executable='/bin/bash',
- env=env)
-
- full_stdout = ''
- full_stderr = ''
-
- # Pull output from pipes, send it to file/stdout/string
- out = err = None
- pipes = [p.stdout, p.stderr]
-
- my_poll = select.poll()
- my_poll.register(p.stdout, select.POLLIN)
- my_poll.register(p.stderr, select.POLLIN)
-
- terminated_time = None
- started_time = time.time()
-
- while pipes:
+ """Provides several methods to execute commands on several environments."""
+
+ def __init__(self, log_level, logger_to_set=None):
+ self.log_level = log_level
+ if log_level == "none":
+ self.logger = None
+ else:
+ if logger_to_set is not None:
+ self.logger = logger_to_set
+ else:
+ self.logger = logger.GetLogger()
+
+ def GetLogLevel(self):
+ return self.log_level
+
+ def SetLogLevel(self, log_level):
+ self.log_level = log_level
+
+ def RunCommandGeneric(
+ self,
+ cmd,
+ return_output=False,
+ machine=None,
+ username=None,
+ command_terminator=None,
+ command_timeout=None,
+ terminated_timeout=10,
+ print_to_console=True,
+ env=None,
+ except_handler=lambda p, e: None,
+ ):
+ """Run a command.
+
+ Returns triplet (returncode, stdout, stderr).
+ """
+
+ cmd = str(cmd)
+
+ if self.log_level == "quiet":
+ print_to_console = False
+
+ if self.log_level == "verbose":
+ self.logger.LogCmd(cmd, machine, username, print_to_console)
+ elif self.logger:
+ self.logger.LogCmdToFileOnly(cmd, machine, username)
if command_terminator and command_terminator.IsTerminated():
- os.killpg(os.getpgid(p.pid), signal.SIGTERM)
- if self.logger:
- self.logger.LogError(
- 'Command received termination request. '
- 'Killed child process group.', print_to_console)
- break
-
- l = my_poll.poll(100)
- for (fd, _) in l:
- if fd == p.stdout.fileno():
- out = os.read(p.stdout.fileno(), 16384).decode('utf8')
- if return_output:
- full_stdout += out
if self.logger:
- self.logger.LogCommandOutput(out, print_to_console)
- if out == '':
- pipes.remove(p.stdout)
- my_poll.unregister(p.stdout)
- if fd == p.stderr.fileno():
- err = os.read(p.stderr.fileno(), 16384).decode('utf8')
+ self.logger.LogError(
+ "Command was terminated!", print_to_console
+ )
+ return (1, "", "")
+
+ if machine is not None:
+ user = ""
+ if username is not None:
+ user = username + "@"
+ cmd = "ssh -t -t %s%s -- '%s'" % (user, machine, cmd)
+
+ # We use setsid so that the child will have a different session id
+ # and we can easily kill the process group. This is also important
+ # because the child will be disassociated from the parent terminal.
+ # In this way the child cannot mess the parent's terminal.
+ p = None
+ try:
+ # pylint: disable=bad-option-value, subprocess-popen-preexec-fn
+ p = subprocess.Popen(
+ cmd,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ shell=True,
+ preexec_fn=os.setsid,
+ executable="/bin/bash",
+ env=env,
+ )
+
+ full_stdout = ""
+ full_stderr = ""
+
+ # Pull output from pipes, send it to file/stdout/string
+ out = err = None
+ pipes = [p.stdout, p.stderr]
+
+ my_poll = select.poll()
+ my_poll.register(p.stdout, select.POLLIN)
+ my_poll.register(p.stderr, select.POLLIN)
+
+ terminated_time = None
+ started_time = time.time()
+
+ while pipes:
+ if command_terminator and command_terminator.IsTerminated():
+ os.killpg(os.getpgid(p.pid), signal.SIGTERM)
+ if self.logger:
+ self.logger.LogError(
+ "Command received termination request. "
+ "Killed child process group.",
+ print_to_console,
+ )
+ break
+
+ l = my_poll.poll(100)
+ for (fd, _) in l:
+ if fd == p.stdout.fileno():
+ out = os.read(p.stdout.fileno(), 16384).decode("utf8")
+ if return_output:
+ full_stdout += out
+ if self.logger:
+ self.logger.LogCommandOutput(out, print_to_console)
+ if out == "":
+ pipes.remove(p.stdout)
+ my_poll.unregister(p.stdout)
+ if fd == p.stderr.fileno():
+ err = os.read(p.stderr.fileno(), 16384).decode("utf8")
+ if return_output:
+ full_stderr += err
+ if self.logger:
+ self.logger.LogCommandError(err, print_to_console)
+ if err == "":
+ pipes.remove(p.stderr)
+ my_poll.unregister(p.stderr)
+
+ if p.poll() is not None:
+ if terminated_time is None:
+ terminated_time = time.time()
+ elif (
+ terminated_timeout is not None
+ and time.time() - terminated_time > terminated_timeout
+ ):
+ if self.logger:
+ self.logger.LogWarning(
+ "Timeout of %s seconds reached since "
+ "process termination." % terminated_timeout,
+ print_to_console,
+ )
+ break
+
+ if (
+ command_timeout is not None
+ and time.time() - started_time > command_timeout
+ ):
+ os.killpg(os.getpgid(p.pid), signal.SIGTERM)
+ if self.logger:
+ self.logger.LogWarning(
+ "Timeout of %s seconds reached since process"
+ "started. Killed child process group."
+ % command_timeout,
+ print_to_console,
+ )
+ break
+
+ if out == err == "":
+ break
+
+ p.wait()
if return_output:
- full_stderr += err
+ return (p.returncode, full_stdout, full_stderr)
+ return (p.returncode, "", "")
+ except BaseException as err:
+ except_handler(p, err)
+ raise
+
+ def RunCommand(self, *args, **kwargs):
+ """Run a command.
+
+ Takes the same arguments as RunCommandGeneric except for return_output.
+ Returns a single value returncode.
+ """
+ # Make sure that args does not overwrite 'return_output'
+ assert len(args) <= 1
+ assert "return_output" not in kwargs
+ kwargs["return_output"] = False
+ return self.RunCommandGeneric(*args, **kwargs)[0]
+
+ def RunCommandWExceptionCleanup(self, *args, **kwargs):
+ """Run a command and kill process if exception is thrown.
+
+ Takes the same arguments as RunCommandGeneric except for except_handler.
+ Returns same as RunCommandGeneric.
+ """
+
+ def KillProc(proc, _):
+ if proc:
+ os.killpg(os.getpgid(proc.pid), signal.SIGTERM)
+
+ # Make sure that args does not overwrite 'except_handler'
+ assert len(args) <= 8
+ assert "except_handler" not in kwargs
+ kwargs["except_handler"] = KillProc
+ return self.RunCommandGeneric(*args, **kwargs)
+
+ def RunCommandWOutput(self, *args, **kwargs):
+ """Run a command.
+
+ Takes the same arguments as RunCommandGeneric except for return_output.
+ Returns a triplet (returncode, stdout, stderr).
+ """
+ # Make sure that args does not overwrite 'return_output'
+ assert len(args) <= 1
+ assert "return_output" not in kwargs
+ kwargs["return_output"] = True
+ return self.RunCommandGeneric(*args, **kwargs)
+
+ def RemoteAccessInitCommand(self, chromeos_root, machine, port=None):
+ command = ""
+ command += "\nset -- --remote=" + machine
+ if port:
+ command += " --ssh_port=" + port
+ command += "\n. " + chromeos_root + "/src/scripts/common.sh"
+ command += "\n. " + chromeos_root + "/src/scripts/remote_access.sh"
+ command += "\nTMP=$(mktemp -d)"
+ command += '\nFLAGS "$@" || exit 1'
+ command += "\nremote_access_init"
+ return command
+
+ def WriteToTempShFile(self, contents):
+ with tempfile.NamedTemporaryFile(
+ "w",
+ encoding="utf-8",
+ delete=False,
+ prefix=os.uname()[1],
+ suffix=".sh",
+ ) as f:
+ f.write("#!/bin/bash\n")
+ f.write(contents)
+ f.flush()
+ return f.name
+
+ def CrosLearnBoard(self, chromeos_root, machine):
+ command = self.RemoteAccessInitCommand(chromeos_root, machine)
+ command += "\nlearn_board"
+ command += "\necho ${FLAGS_board}"
+ retval, output, _ = self.RunCommandWOutput(command)
+ if self.logger:
+ self.logger.LogFatalIf(retval, "learn_board command failed")
+ elif retval:
+ sys.exit(1)
+ return output.split()[-1]
+
+ def CrosRunCommandGeneric(
+ self,
+ cmd,
+ return_output=False,
+ machine=None,
+ command_terminator=None,
+ chromeos_root=None,
+ command_timeout=None,
+ terminated_timeout=10,
+ print_to_console=True,
+ ):
+ """Run a command on a ChromeOS box.
+
+ Returns triplet (returncode, stdout, stderr).
+ """
+
+ if self.log_level != "verbose":
+ print_to_console = False
+
+ if self.logger:
+ self.logger.LogCmd(cmd, print_to_console=print_to_console)
+ self.logger.LogFatalIf(not machine, "No machine provided!")
+ self.logger.LogFatalIf(
+ not chromeos_root, "chromeos_root not given!"
+ )
+ else:
+ if not chromeos_root or not machine:
+ sys.exit(1)
+ chromeos_root = os.path.expanduser(chromeos_root)
+
+ port = None
+ if ":" in machine:
+ machine, port = machine.split(":")
+ # Write all commands to a file.
+ command_file = self.WriteToTempShFile(cmd)
+ retval = self.CopyFiles(
+ command_file,
+ command_file,
+ dest_machine=machine,
+ dest_port=port,
+ command_terminator=command_terminator,
+ chromeos_root=chromeos_root,
+ dest_cros=True,
+ recursive=False,
+ print_to_console=print_to_console,
+ )
+ if retval:
if self.logger:
- self.logger.LogCommandError(err, print_to_console)
- if err == '':
- pipes.remove(p.stderr)
- my_poll.unregister(p.stderr)
-
- if p.poll() is not None:
- if terminated_time is None:
- terminated_time = time.time()
- elif (terminated_timeout is not None
- and time.time() - terminated_time > terminated_timeout):
+ self.logger.LogError(
+ "Could not run remote command on machine."
+ " Is the machine up?"
+ )
+ return (retval, "", "")
+
+ command = self.RemoteAccessInitCommand(chromeos_root, machine, port)
+ command += "\nremote_sh bash %s" % command_file
+ command += '\nl_retval=$?; echo "$REMOTE_OUT"; exit $l_retval'
+ retval = self.RunCommandGeneric(
+ command,
+ return_output,
+ command_terminator=command_terminator,
+ command_timeout=command_timeout,
+ terminated_timeout=terminated_timeout,
+ print_to_console=print_to_console,
+ )
+ if return_output:
+ connect_signature = (
+ "Initiating first contact with remote host\n"
+ + "Connection OK\n"
+ )
+ connect_signature_re = re.compile(connect_signature)
+ modded_retval = list(retval)
+ modded_retval[1] = connect_signature_re.sub("", retval[1])
+ return modded_retval
+ return retval
+
+ def CrosRunCommand(self, *args, **kwargs):
+ """Run a command on a ChromeOS box.
+
+ Takes the same arguments as CrosRunCommandGeneric except for return_output.
+ Returns a single value returncode.
+ """
+ # Make sure that args does not overwrite 'return_output'
+ assert len(args) <= 1
+ assert "return_output" not in kwargs
+ kwargs["return_output"] = False
+ return self.CrosRunCommandGeneric(*args, **kwargs)[0]
+
+ def CrosRunCommandWOutput(self, *args, **kwargs):
+ """Run a command on a ChromeOS box.
+
+ Takes the same arguments as CrosRunCommandGeneric except for return_output.
+ Returns a triplet (returncode, stdout, stderr).
+ """
+ # Make sure that args does not overwrite 'return_output'
+ assert len(args) <= 1
+ assert "return_output" not in kwargs
+ kwargs["return_output"] = True
+ return self.CrosRunCommandGeneric(*args, **kwargs)
+
+ def ChrootRunCommandGeneric(
+ self,
+ chromeos_root,
+ command,
+ return_output=False,
+ command_terminator=None,
+ command_timeout=None,
+ terminated_timeout=10,
+ print_to_console=True,
+ cros_sdk_options="",
+ env=None,
+ ):
+ """Runs a command within the chroot.
+
+ Returns triplet (returncode, stdout, stderr).
+ """
+
+ if self.log_level != "verbose":
+ print_to_console = False
+
+ if self.logger:
+ self.logger.LogCmd(command, print_to_console=print_to_console)
+
+ with tempfile.NamedTemporaryFile(
+ "w",
+ encoding="utf-8",
+ delete=False,
+ dir=os.path.join(chromeos_root, "src/scripts"),
+ suffix=".sh",
+ prefix="in_chroot_cmd",
+ ) as f:
+ f.write("#!/bin/bash\n")
+ f.write(command)
+ f.write("\n")
+ f.flush()
+
+ command_file = f.name
+ os.chmod(command_file, 0o777)
+
+ # if return_output is set, run a test command first to make sure that
+ # the chroot already exists. We want the final returned output to skip
+ # the output from chroot creation steps.
+ if return_output:
+ ret = self.RunCommand(
+ "cd %s; cros_sdk %s -- true"
+ % (chromeos_root, cros_sdk_options),
+ env=env,
+ # Give this command a long time to execute; it might involve setting
+ # the chroot up, or running fstrim on its image file. Both of these
+ # operations can take well over the timeout default of 10 seconds.
+ terminated_timeout=5 * 60,
+ )
+ if ret:
+ return (ret, "", "")
+
+ # Run command_file inside the chroot, making sure that any "~" is expanded
+ # by the shell inside the chroot, not outside.
+ command = "cd %s; cros_sdk %s -- bash -c '%s/%s'" % (
+ chromeos_root,
+ cros_sdk_options,
+ CHROMEOS_SCRIPTS_DIR,
+ os.path.basename(command_file),
+ )
+ ret = self.RunCommandGeneric(
+ command,
+ return_output,
+ command_terminator=command_terminator,
+ command_timeout=command_timeout,
+ terminated_timeout=terminated_timeout,
+ print_to_console=print_to_console,
+ env=env,
+ )
+ os.remove(command_file)
+ return ret
+
+ def ChrootRunCommand(self, *args, **kwargs):
+ """Runs a command within the chroot.
+
+ Takes the same arguments as ChrootRunCommandGeneric except for
+ return_output.
+ Returns a single value returncode.
+ """
+ # Make sure that args does not overwrite 'return_output'
+ assert len(args) <= 2
+ assert "return_output" not in kwargs
+ kwargs["return_output"] = False
+ return self.ChrootRunCommandGeneric(*args, **kwargs)[0]
+
+ def ChrootRunCommandWOutput(self, *args, **kwargs):
+ """Runs a command within the chroot.
+
+ Takes the same arguments as ChrootRunCommandGeneric except for
+ return_output.
+ Returns a triplet (returncode, stdout, stderr).
+ """
+ # Make sure that args does not overwrite 'return_output'
+ assert len(args) <= 2
+ assert "return_output" not in kwargs
+ kwargs["return_output"] = True
+ return self.ChrootRunCommandGeneric(*args, **kwargs)
+
+ def RunCommands(
+ self, cmdlist, machine=None, username=None, command_terminator=None
+ ):
+ cmd = " ;\n".join(cmdlist)
+ return self.RunCommand(
+ cmd,
+ machine=machine,
+ username=username,
+ command_terminator=command_terminator,
+ )
+
+ def CopyFiles(
+ self,
+ src,
+ dest,
+ src_machine=None,
+ src_port=None,
+ dest_machine=None,
+ dest_port=None,
+ src_user=None,
+ dest_user=None,
+ recursive=True,
+ command_terminator=None,
+ chromeos_root=None,
+ src_cros=False,
+ dest_cros=False,
+ print_to_console=True,
+ ):
+ src = os.path.expanduser(src)
+ dest = os.path.expanduser(dest)
+
+ if recursive:
+ src = src + "/"
+ dest = dest + "/"
+
+ if src_cros or dest_cros:
if self.logger:
- self.logger.LogWarning(
- 'Timeout of %s seconds reached since '
- 'process termination.' % terminated_timeout,
- print_to_console)
- break
-
- if (command_timeout is not None
- and time.time() - started_time > command_timeout):
- os.killpg(os.getpgid(p.pid), signal.SIGTERM)
- if self.logger:
- self.logger.LogWarning(
- 'Timeout of %s seconds reached since process'
- 'started. Killed child process group.' % command_timeout,
- print_to_console)
- break
-
- if out == err == '':
- break
-
- p.wait()
- if return_output:
- return (p.returncode, full_stdout, full_stderr)
- return (p.returncode, '', '')
- except BaseException as err:
- except_handler(p, err)
- raise
-
- def RunCommand(self, *args, **kwargs):
- """Run a command.
-
- Takes the same arguments as RunCommandGeneric except for return_output.
- Returns a single value returncode.
- """
- # Make sure that args does not overwrite 'return_output'
- assert len(args) <= 1
- assert 'return_output' not in kwargs
- kwargs['return_output'] = False
- return self.RunCommandGeneric(*args, **kwargs)[0]
-
- def RunCommandWExceptionCleanup(self, *args, **kwargs):
- """Run a command and kill process if exception is thrown.
-
- Takes the same arguments as RunCommandGeneric except for except_handler.
- Returns same as RunCommandGeneric.
- """
-
- def KillProc(proc, _):
- if proc:
- os.killpg(os.getpgid(proc.pid), signal.SIGTERM)
-
- # Make sure that args does not overwrite 'except_handler'
- assert len(args) <= 8
- assert 'except_handler' not in kwargs
- kwargs['except_handler'] = KillProc
- return self.RunCommandGeneric(*args, **kwargs)
-
- def RunCommandWOutput(self, *args, **kwargs):
- """Run a command.
-
- Takes the same arguments as RunCommandGeneric except for return_output.
- Returns a triplet (returncode, stdout, stderr).
- """
- # Make sure that args does not overwrite 'return_output'
- assert len(args) <= 1
- assert 'return_output' not in kwargs
- kwargs['return_output'] = True
- return self.RunCommandGeneric(*args, **kwargs)
-
- def RemoteAccessInitCommand(self, chromeos_root, machine, port=None):
- command = ''
- command += '\nset -- --remote=' + machine
- if port:
- command += ' --ssh_port=' + port
- command += '\n. ' + chromeos_root + '/src/scripts/common.sh'
- command += '\n. ' + chromeos_root + '/src/scripts/remote_access.sh'
- command += '\nTMP=$(mktemp -d)'
- command += '\nFLAGS "$@" || exit 1'
- command += '\nremote_access_init'
- return command
-
- def WriteToTempShFile(self, contents):
- with tempfile.NamedTemporaryFile('w',
- encoding='utf-8',
- delete=False,
- prefix=os.uname()[1],
- suffix='.sh') as f:
- f.write('#!/bin/bash\n')
- f.write(contents)
- f.flush()
- return f.name
-
- def CrosLearnBoard(self, chromeos_root, machine):
- command = self.RemoteAccessInitCommand(chromeos_root, machine)
- command += '\nlearn_board'
- command += '\necho ${FLAGS_board}'
- retval, output, _ = self.RunCommandWOutput(command)
- if self.logger:
- self.logger.LogFatalIf(retval, 'learn_board command failed')
- elif retval:
- sys.exit(1)
- return output.split()[-1]
-
- def CrosRunCommandGeneric(self,
- cmd,
- return_output=False,
- machine=None,
- command_terminator=None,
- chromeos_root=None,
- command_timeout=None,
- terminated_timeout=10,
- print_to_console=True):
- """Run a command on a ChromeOS box.
-
- Returns triplet (returncode, stdout, stderr).
- """
-
- if self.log_level != 'verbose':
- print_to_console = False
-
- if self.logger:
- self.logger.LogCmd(cmd, print_to_console=print_to_console)
- self.logger.LogFatalIf(not machine, 'No machine provided!')
- self.logger.LogFatalIf(not chromeos_root, 'chromeos_root not given!')
- else:
- if not chromeos_root or not machine:
- sys.exit(1)
- chromeos_root = os.path.expanduser(chromeos_root)
-
- port = None
- if ':' in machine:
- machine, port = machine.split(':')
- # Write all commands to a file.
- command_file = self.WriteToTempShFile(cmd)
- retval = self.CopyFiles(command_file,
- command_file,
- dest_machine=machine,
- dest_port=port,
- command_terminator=command_terminator,
- chromeos_root=chromeos_root,
- dest_cros=True,
- recursive=False,
- print_to_console=print_to_console)
- if retval:
- if self.logger:
- self.logger.LogError('Could not run remote command on machine.'
- ' Is the machine up?')
- return (retval, '', '')
-
- command = self.RemoteAccessInitCommand(chromeos_root, machine, port)
- command += '\nremote_sh bash %s' % command_file
- command += '\nl_retval=$?; echo "$REMOTE_OUT"; exit $l_retval'
- retval = self.RunCommandGeneric(command,
- return_output,
- command_terminator=command_terminator,
- command_timeout=command_timeout,
- terminated_timeout=terminated_timeout,
- print_to_console=print_to_console)
- if return_output:
- connect_signature = ('Initiating first contact with remote host\n' +
- 'Connection OK\n')
- connect_signature_re = re.compile(connect_signature)
- modded_retval = list(retval)
- modded_retval[1] = connect_signature_re.sub('', retval[1])
- return modded_retval
- return retval
-
- def CrosRunCommand(self, *args, **kwargs):
- """Run a command on a ChromeOS box.
-
- Takes the same arguments as CrosRunCommandGeneric except for return_output.
- Returns a single value returncode.
- """
- # Make sure that args does not overwrite 'return_output'
- assert len(args) <= 1
- assert 'return_output' not in kwargs
- kwargs['return_output'] = False
- return self.CrosRunCommandGeneric(*args, **kwargs)[0]
-
- def CrosRunCommandWOutput(self, *args, **kwargs):
- """Run a command on a ChromeOS box.
-
- Takes the same arguments as CrosRunCommandGeneric except for return_output.
- Returns a triplet (returncode, stdout, stderr).
- """
- # Make sure that args does not overwrite 'return_output'
- assert len(args) <= 1
- assert 'return_output' not in kwargs
- kwargs['return_output'] = True
- return self.CrosRunCommandGeneric(*args, **kwargs)
-
- def ChrootRunCommandGeneric(self,
- chromeos_root,
- command,
- return_output=False,
- command_terminator=None,
- command_timeout=None,
- terminated_timeout=10,
- print_to_console=True,
- cros_sdk_options='',
- env=None):
- """Runs a command within the chroot.
-
- Returns triplet (returncode, stdout, stderr).
- """
-
- if self.log_level != 'verbose':
- print_to_console = False
-
- if self.logger:
- self.logger.LogCmd(command, print_to_console=print_to_console)
-
- with tempfile.NamedTemporaryFile('w',
- encoding='utf-8',
- delete=False,
- dir=os.path.join(chromeos_root,
- 'src/scripts'),
- suffix='.sh',
- prefix='in_chroot_cmd') as f:
- f.write('#!/bin/bash\n')
- f.write(command)
- f.write('\n')
- f.flush()
-
- command_file = f.name
- os.chmod(command_file, 0o777)
-
- # if return_output is set, run a test command first to make sure that
- # the chroot already exists. We want the final returned output to skip
- # the output from chroot creation steps.
- if return_output:
- ret = self.RunCommand(
- 'cd %s; cros_sdk %s -- true' % (chromeos_root, cros_sdk_options),
- env=env,
- # Give this command a long time to execute; it might involve setting
- # the chroot up, or running fstrim on its image file. Both of these
- # operations can take well over the timeout default of 10 seconds.
- terminated_timeout=5 * 60)
- if ret:
- return (ret, '', '')
-
- # Run command_file inside the chroot, making sure that any "~" is expanded
- # by the shell inside the chroot, not outside.
- command = ("cd %s; cros_sdk %s -- bash -c '%s/%s'" %
- (chromeos_root, cros_sdk_options, CHROMEOS_SCRIPTS_DIR,
- os.path.basename(command_file)))
- ret = self.RunCommandGeneric(command,
- return_output,
- command_terminator=command_terminator,
- command_timeout=command_timeout,
- terminated_timeout=terminated_timeout,
- print_to_console=print_to_console,
- env=env)
- os.remove(command_file)
- return ret
-
- def ChrootRunCommand(self, *args, **kwargs):
- """Runs a command within the chroot.
-
- Takes the same arguments as ChrootRunCommandGeneric except for
- return_output.
- Returns a single value returncode.
- """
- # Make sure that args does not overwrite 'return_output'
- assert len(args) <= 2
- assert 'return_output' not in kwargs
- kwargs['return_output'] = False
- return self.ChrootRunCommandGeneric(*args, **kwargs)[0]
-
- def ChrootRunCommandWOutput(self, *args, **kwargs):
- """Runs a command within the chroot.
-
- Takes the same arguments as ChrootRunCommandGeneric except for
- return_output.
- Returns a triplet (returncode, stdout, stderr).
- """
- # Make sure that args does not overwrite 'return_output'
- assert len(args) <= 2
- assert 'return_output' not in kwargs
- kwargs['return_output'] = True
- return self.ChrootRunCommandGeneric(*args, **kwargs)
-
- def RunCommands(self,
- cmdlist,
- machine=None,
- username=None,
- command_terminator=None):
- cmd = ' ;\n'.join(cmdlist)
- return self.RunCommand(cmd,
- machine=machine,
- username=username,
- command_terminator=command_terminator)
-
- def CopyFiles(self,
+ self.logger.LogFatalIf(
+ src_cros == dest_cros,
+ "Only one of src_cros and desc_cros can " "be True.",
+ )
+ self.logger.LogFatalIf(
+ not chromeos_root, "chromeos_root not given!"
+ )
+ elif src_cros == dest_cros or not chromeos_root:
+ sys.exit(1)
+ if src_cros:
+ cros_machine = src_machine
+ cros_port = src_port
+ host_machine = dest_machine
+ host_user = dest_user
+ else:
+ cros_machine = dest_machine
+ cros_port = dest_port
+ host_machine = src_machine
+ host_user = src_user
+
+ command = self.RemoteAccessInitCommand(
+ chromeos_root, cros_machine, cros_port
+ )
+ ssh_command = (
+ "ssh -o StrictHostKeyChecking=no"
+ + " -o UserKnownHostsFile=$(mktemp)"
+ + " -i $TMP_PRIVATE_KEY"
+ )
+ if cros_port:
+ ssh_command += " -p %s" % cros_port
+ rsync_prefix = '\nrsync -r -e "%s" ' % ssh_command
+ if dest_cros:
+ command += rsync_prefix + "%s root@%s:%s" % (
+ src,
+ cros_machine,
+ dest,
+ )
+ else:
+ command += rsync_prefix + "root@%s:%s %s" % (
+ cros_machine,
+ src,
+ dest,
+ )
+
+ return self.RunCommand(
+ command,
+ machine=host_machine,
+ username=host_user,
+ command_terminator=command_terminator,
+ print_to_console=print_to_console,
+ )
+
+ if dest_machine == src_machine:
+ command = "rsync -a %s %s" % (src, dest)
+ else:
+ if src_machine is None:
+ src_machine = os.uname()[1]
+ src_user = getpass.getuser()
+ command = "rsync -a %s@%s:%s %s" % (
+ src_user,
+ src_machine,
src,
dest,
- src_machine=None,
- src_port=None,
- dest_machine=None,
- dest_port=None,
- src_user=None,
- dest_user=None,
- recursive=True,
- command_terminator=None,
- chromeos_root=None,
- src_cros=False,
- dest_cros=False,
- print_to_console=True):
- src = os.path.expanduser(src)
- dest = os.path.expanduser(dest)
-
- if recursive:
- src = src + '/'
- dest = dest + '/'
-
- if src_cros or dest_cros:
- if self.logger:
- self.logger.LogFatalIf(
- src_cros == dest_cros, 'Only one of src_cros and desc_cros can '
- 'be True.')
- self.logger.LogFatalIf(not chromeos_root, 'chromeos_root not given!')
- elif src_cros == dest_cros or not chromeos_root:
- sys.exit(1)
- if src_cros:
- cros_machine = src_machine
- cros_port = src_port
- host_machine = dest_machine
- host_user = dest_user
- else:
- cros_machine = dest_machine
- cros_port = dest_port
- host_machine = src_machine
- host_user = src_user
-
- command = self.RemoteAccessInitCommand(chromeos_root, cros_machine,
- cros_port)
- ssh_command = ('ssh -o StrictHostKeyChecking=no' +
- ' -o UserKnownHostsFile=$(mktemp)' +
- ' -i $TMP_PRIVATE_KEY')
- if cros_port:
- ssh_command += ' -p %s' % cros_port
- rsync_prefix = '\nrsync -r -e "%s" ' % ssh_command
- if dest_cros:
- command += rsync_prefix + '%s root@%s:%s' % (src, cros_machine, dest)
- else:
- command += rsync_prefix + 'root@%s:%s %s' % (cros_machine, src, dest)
-
- return self.RunCommand(command,
- machine=host_machine,
- username=host_user,
- command_terminator=command_terminator,
- print_to_console=print_to_console)
-
- if dest_machine == src_machine:
- command = 'rsync -a %s %s' % (src, dest)
- else:
- if src_machine is None:
- src_machine = os.uname()[1]
- src_user = getpass.getuser()
- command = 'rsync -a %s@%s:%s %s' % (src_user, src_machine, src, dest)
- return self.RunCommand(command,
- machine=dest_machine,
- username=dest_user,
- command_terminator=command_terminator,
- print_to_console=print_to_console)
-
- def RunCommand2(self,
- cmd,
- cwd=None,
- line_consumer=None,
- timeout=None,
- shell=True,
- join_stderr=True,
- env=None,
- except_handler=lambda p, e: None):
- """Run the command with an extra feature line_consumer.
-
- This version allow developers to provide a line_consumer which will be
- fed execution output lines.
-
- A line_consumer is a callback, which is given a chance to run for each
- line the execution outputs (either to stdout or stderr). The
- line_consumer must accept one and exactly one dict argument, the dict
- argument has these items -
- 'line' - The line output by the binary. Notice, this string includes
- the trailing '\n'.
- 'output' - Whether this is a stdout or stderr output, values are either
- 'stdout' or 'stderr'. When join_stderr is True, this value
- will always be 'output'.
- 'pobject' - The object used to control execution, for example, call
- pobject.kill().
-
- Note: As this is written, the stdin for the process executed is
- not associated with the stdin of the caller of this routine.
-
- Args:
- cmd: Command in a single string.
- cwd: Working directory for execution.
- line_consumer: A function that will ba called by this function. See above
- for details.
- timeout: terminate command after this timeout.
- shell: Whether to use a shell for execution.
- join_stderr: Whether join stderr to stdout stream.
- env: Execution environment.
- except_handler: Callback for when exception is thrown during command
- execution. Passed process object and exception.
-
- Returns:
- Execution return code.
-
- Raises:
- child_exception: if fails to start the command process (missing
- permission, no such file, etc)
- """
-
- class StreamHandler(object):
- """Internal utility class."""
-
- def __init__(self, pobject, fd, name, line_consumer):
- self._pobject = pobject
- self._fd = fd
- self._name = name
- self._buf = ''
- self._line_consumer = line_consumer
-
- def read_and_notify_line(self):
- t = os.read(fd, 1024)
- self._buf = self._buf + t
- self.notify_line()
-
- def notify_line(self):
- p = self._buf.find('\n')
- while p >= 0:
- self._line_consumer(line=self._buf[:p + 1],
- output=self._name,
- pobject=self._pobject)
- if p < len(self._buf) - 1:
- self._buf = self._buf[p + 1:]
- p = self._buf.find('\n')
- else:
- self._buf = ''
- p = -1
- break
-
- def notify_eos(self):
- # Notify end of stream. The last line may not end with a '\n'.
- if self._buf != '':
- self._line_consumer(line=self._buf,
- output=self._name,
- pobject=self._pobject)
- self._buf = ''
-
- if self.log_level == 'verbose':
- self.logger.LogCmd(cmd)
- elif self.logger:
- self.logger.LogCmdToFileOnly(cmd)
-
- # We use setsid so that the child will have a different session id
- # and we can easily kill the process group. This is also important
- # because the child will be disassociated from the parent terminal.
- # In this way the child cannot mess the parent's terminal.
- pobject = None
- try:
- # pylint: disable=bad-option-value, subprocess-popen-preexec-fn
- pobject = subprocess.Popen(
- cmd,
- cwd=cwd,
- bufsize=1024,
- env=env,
- shell=shell,
- universal_newlines=True,
- stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT if join_stderr else subprocess.PIPE,
- preexec_fn=os.setsid)
-
- # We provide a default line_consumer
- if line_consumer is None:
- line_consumer = lambda **d: None
- start_time = time.time()
- poll = select.poll()
- outfd = pobject.stdout.fileno()
- poll.register(outfd, select.POLLIN | select.POLLPRI)
- handlermap = {
- outfd: StreamHandler(pobject, outfd, 'stdout', line_consumer)
- }
- if not join_stderr:
- errfd = pobject.stderr.fileno()
- poll.register(errfd, select.POLLIN | select.POLLPRI)
- handlermap[errfd] = StreamHandler(pobject, errfd, 'stderr',
- line_consumer)
- while handlermap:
- readables = poll.poll(300)
- for (fd, evt) in readables:
- handler = handlermap[fd]
- if evt & (select.POLLPRI | select.POLLIN):
- handler.read_and_notify_line()
- elif evt & (select.POLLHUP | select.POLLERR | select.POLLNVAL):
- handler.notify_eos()
- poll.unregister(fd)
- del handlermap[fd]
-
- if timeout is not None and (time.time() - start_time > timeout):
- os.killpg(os.getpgid(pobject.pid), signal.SIGTERM)
-
- return pobject.wait()
- except BaseException as err:
- except_handler(pobject, err)
- raise
+ )
+ return self.RunCommand(
+ command,
+ machine=dest_machine,
+ username=dest_user,
+ command_terminator=command_terminator,
+ print_to_console=print_to_console,
+ )
+
+ def RunCommand2(
+ self,
+ cmd,
+ cwd=None,
+ line_consumer=None,
+ timeout=None,
+ shell=True,
+ join_stderr=True,
+ env=None,
+ except_handler=lambda p, e: None,
+ ):
+ """Run the command with an extra feature line_consumer.
+
+ This version allow developers to provide a line_consumer which will be
+ fed execution output lines.
+
+ A line_consumer is a callback, which is given a chance to run for each
+ line the execution outputs (either to stdout or stderr). The
+ line_consumer must accept one and exactly one dict argument, the dict
+ argument has these items -
+ 'line' - The line output by the binary. Notice, this string includes
+ the trailing '\n'.
+ 'output' - Whether this is a stdout or stderr output, values are either
+ 'stdout' or 'stderr'. When join_stderr is True, this value
+ will always be 'output'.
+ 'pobject' - The object used to control execution, for example, call
+ pobject.kill().
+
+ Note: As this is written, the stdin for the process executed is
+ not associated with the stdin of the caller of this routine.
+
+ Args:
+ cmd: Command in a single string.
+ cwd: Working directory for execution.
+ line_consumer: A function that will ba called by this function. See above
+ for details.
+ timeout: terminate command after this timeout.
+ shell: Whether to use a shell for execution.
+ join_stderr: Whether join stderr to stdout stream.
+ env: Execution environment.
+ except_handler: Callback for when exception is thrown during command
+ execution. Passed process object and exception.
+
+ Returns:
+ Execution return code.
+
+ Raises:
+ child_exception: if fails to start the command process (missing
+ permission, no such file, etc)
+ """
+
+ class StreamHandler(object):
+ """Internal utility class."""
+
+ def __init__(self, pobject, fd, name, line_consumer):
+ self._pobject = pobject
+ self._fd = fd
+ self._name = name
+ self._buf = ""
+ self._line_consumer = line_consumer
+
+ def read_and_notify_line(self):
+ t = os.read(fd, 1024)
+ self._buf = self._buf + t
+ self.notify_line()
+
+ def notify_line(self):
+ p = self._buf.find("\n")
+ while p >= 0:
+ self._line_consumer(
+ line=self._buf[: p + 1],
+ output=self._name,
+ pobject=self._pobject,
+ )
+ if p < len(self._buf) - 1:
+ self._buf = self._buf[p + 1 :]
+ p = self._buf.find("\n")
+ else:
+ self._buf = ""
+ p = -1
+ break
+
+ def notify_eos(self):
+ # Notify end of stream. The last line may not end with a '\n'.
+ if self._buf != "":
+ self._line_consumer(
+ line=self._buf, output=self._name, pobject=self._pobject
+ )
+ self._buf = ""
+
+ if self.log_level == "verbose":
+ self.logger.LogCmd(cmd)
+ elif self.logger:
+ self.logger.LogCmdToFileOnly(cmd)
+
+ # We use setsid so that the child will have a different session id
+ # and we can easily kill the process group. This is also important
+ # because the child will be disassociated from the parent terminal.
+ # In this way the child cannot mess the parent's terminal.
+ pobject = None
+ try:
+ # pylint: disable=bad-option-value, subprocess-popen-preexec-fn
+ pobject = subprocess.Popen(
+ cmd,
+ cwd=cwd,
+ bufsize=1024,
+ env=env,
+ shell=shell,
+ universal_newlines=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT if join_stderr else subprocess.PIPE,
+ preexec_fn=os.setsid,
+ )
+
+ # We provide a default line_consumer
+ if line_consumer is None:
+ line_consumer = lambda **d: None
+ start_time = time.time()
+ poll = select.poll()
+ outfd = pobject.stdout.fileno()
+ poll.register(outfd, select.POLLIN | select.POLLPRI)
+ handlermap = {
+ outfd: StreamHandler(pobject, outfd, "stdout", line_consumer)
+ }
+ if not join_stderr:
+ errfd = pobject.stderr.fileno()
+ poll.register(errfd, select.POLLIN | select.POLLPRI)
+ handlermap[errfd] = StreamHandler(
+ pobject, errfd, "stderr", line_consumer
+ )
+ while handlermap:
+ readables = poll.poll(300)
+ for (fd, evt) in readables:
+ handler = handlermap[fd]
+ if evt & (select.POLLPRI | select.POLLIN):
+ handler.read_and_notify_line()
+ elif evt & (
+ select.POLLHUP | select.POLLERR | select.POLLNVAL
+ ):
+ handler.notify_eos()
+ poll.unregister(fd)
+ del handlermap[fd]
+
+ if timeout is not None and (time.time() - start_time > timeout):
+ os.killpg(os.getpgid(pobject.pid), signal.SIGTERM)
+
+ return pobject.wait()
+ except BaseException as err:
+ except_handler(pobject, err)
+ raise
class MockCommandExecuter(CommandExecuter):
- """Mock class for class CommandExecuter."""
-
- def RunCommandGeneric(self,
- cmd,
- return_output=False,
- machine=None,
- username=None,
- command_terminator=None,
- command_timeout=None,
- terminated_timeout=10,
- print_to_console=True,
- env=None,
- except_handler=lambda p, e: None):
- assert not command_timeout
- cmd = str(cmd)
- if machine is None:
- machine = 'localhost'
- if username is None:
- username = 'current'
- logger.GetLogger().LogCmd('(Mock) ' + cmd, machine, username,
- print_to_console)
- return (0, '', '')
-
- def RunCommand(self, *args, **kwargs):
- assert 'return_output' not in kwargs
- kwargs['return_output'] = False
- return self.RunCommandGeneric(*args, **kwargs)[0]
-
- def RunCommandWOutput(self, *args, **kwargs):
- assert 'return_output' not in kwargs
- kwargs['return_output'] = True
- return self.RunCommandGeneric(*args, **kwargs)
+ """Mock class for class CommandExecuter."""
+
+ def RunCommandGeneric(
+ self,
+ cmd,
+ return_output=False,
+ machine=None,
+ username=None,
+ command_terminator=None,
+ command_timeout=None,
+ terminated_timeout=10,
+ print_to_console=True,
+ env=None,
+ except_handler=lambda p, e: None,
+ ):
+ assert not command_timeout
+ cmd = str(cmd)
+ if machine is None:
+ machine = "localhost"
+ if username is None:
+ username = "current"
+ logger.GetLogger().LogCmd(
+ "(Mock) " + cmd, machine, username, print_to_console
+ )
+ return (0, "", "")
+
+ def RunCommand(self, *args, **kwargs):
+ assert "return_output" not in kwargs
+ kwargs["return_output"] = False
+ return self.RunCommandGeneric(*args, **kwargs)[0]
+
+ def RunCommandWOutput(self, *args, **kwargs):
+ assert "return_output" not in kwargs
+ kwargs["return_output"] = True
+ return self.RunCommandGeneric(*args, **kwargs)
class CommandTerminator(object):
- """Object to request termination of a command in execution."""
+ """Object to request termination of a command in execution."""
- def __init__(self):
- self.terminated = False
+ def __init__(self):
+ self.terminated = False
- def Terminate(self):
- self.terminated = True
+ def Terminate(self):
+ self.terminated = True
- def IsTerminated(self):
- return self.terminated
+ def IsTerminated(self):
+ return self.terminated
diff --git a/cros_utils/command_executer_timeout_test.py b/cros_utils/command_executer_timeout_test.py
index 1c9c74cd..3af9bd3e 100755
--- a/cros_utils/command_executer_timeout_test.py
+++ b/cros_utils/command_executer_timeout_test.py
@@ -1,15 +1,14 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Timeout test for command_executer."""
-from __future__ import print_function
-__author__ = 'asharif@google.com (Ahmad Sharif)'
+__author__ = "asharif@google.com (Ahmad Sharif)"
import argparse
import sys
@@ -18,20 +17,20 @@ from cros_utils import command_executer
def Usage(parser, message):
- print('ERROR: %s' % message)
- parser.print_help()
- sys.exit(0)
+ print("ERROR: %s" % message)
+ parser.print_help()
+ sys.exit(0)
def Main(argv):
- parser = argparse.ArgumentParser()
- _ = parser.parse_args(argv)
+ parser = argparse.ArgumentParser()
+ _ = parser.parse_args(argv)
- command = 'sleep 1000'
- ce = command_executer.GetCommandExecuter()
- ce.RunCommand(command, command_timeout=1)
- return 0
+ command = "sleep 1000"
+ ce = command_executer.GetCommandExecuter()
+ ce.RunCommand(command, command_timeout=1)
+ return 0
-if __name__ == '__main__':
- Main(sys.argv[1:])
+if __name__ == "__main__":
+ Main(sys.argv[1:])
diff --git a/cros_utils/command_executer_unittest.py b/cros_utils/command_executer_unittest.py
index 22331ae0..7cd46a71 100755
--- a/cros_utils/command_executer_unittest.py
+++ b/cros_utils/command_executer_unittest.py
@@ -1,12 +1,11 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittest for command_executer.py."""
-from __future__ import print_function
import time
import unittest
@@ -15,18 +14,20 @@ from cros_utils import command_executer
class CommandExecuterTest(unittest.TestCase):
- """Test for CommandExecuter class."""
-
- def testTimeout(self):
- timeout = 1
- logging_level = 'average'
- ce = command_executer.CommandExecuter(logging_level)
- start = time.time()
- command = 'sleep 20'
- ce.RunCommand(command, command_timeout=timeout, terminated_timeout=timeout)
- end = time.time()
- self.assertTrue(round(end - start) == timeout)
-
-
-if __name__ == '__main__':
- unittest.main()
+ """Test for CommandExecuter class."""
+
+ def testTimeout(self):
+ timeout = 1
+ logging_level = "average"
+ ce = command_executer.CommandExecuter(logging_level)
+ start = time.time()
+ command = "sleep 20"
+ ce.RunCommand(
+ command, command_timeout=timeout, terminated_timeout=timeout
+ )
+ end = time.time()
+ self.assertTrue(round(end - start) == timeout)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/cros_utils/constants.py b/cros_utils/constants.py
index b12175bb..47c16686 100644
--- a/cros_utils/constants.py
+++ b/cros_utils/constants.py
@@ -1,14 +1,14 @@
# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generic constants used accross modules.
"""
-__author__ = 'shenhan@google.com (Han Shen)'
+__author__ = "shenhan@google.com (Han Shen)"
-MOUNTED_TOOLCHAIN_ROOT = '/usr/local/toolchain_root'
+MOUNTED_TOOLCHAIN_ROOT = "/usr/local/toolchain_root"
# Root directory for night testing run.
-CROSTC_WORKSPACE = '/usr/local/google/crostc'
+CROSTC_WORKSPACE = "/usr/local/google/crostc"
diff --git a/cros_utils/device_setup_utils.py b/cros_utils/device_setup_utils.py
index 61dbba27..443c6474 100644
--- a/cros_utils/device_setup_utils.py
+++ b/cros_utils/device_setup_utils.py
@@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
#
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -9,529 +9,607 @@
This script provides utils to set device specs.
"""
-from __future__ import division
-from __future__ import print_function
-__author__ = 'zhizhouy@google.com (Zhizhou Yang)'
+__author__ = "zhizhouy@google.com (Zhizhou Yang)"
+from contextlib import contextmanager
import re
import time
-from contextlib import contextmanager
-
from cros_utils import command_executer
class DutWrapper(object):
- """Wrap DUT parameters inside."""
-
- def __init__(self,
- chromeos_root,
- remote,
- log_level='verbose',
- logger=None,
- ce=None,
- dut_config=None):
- self.chromeos_root = chromeos_root
- self.remote = remote
- self.log_level = log_level
- self.logger = logger
- self.ce = ce or command_executer.GetCommandExecuter(log_level=log_level)
- self.dut_config = dut_config
-
- def RunCommandOnDut(self, command, ignore_status=False):
- """Helper function to run command on DUT."""
- ret, msg, err_msg = self.ce.CrosRunCommandWOutput(
- command, machine=self.remote, chromeos_root=self.chromeos_root)
-
- if ret:
- err_msg = ('Command execution on DUT %s failed.\n'
- 'Failing command: %s\n'
- 'returned %d\n'
- 'Error message: %s' % (self.remote, command, ret, err_msg))
- if ignore_status:
- self.logger.LogError(err_msg +
- '\n(Failure is considered non-fatal. Continue.)')
- else:
- self.logger.LogFatal(err_msg)
-
- return ret, msg, err_msg
-
- def DisableASLR(self):
- """Disable ASLR on DUT."""
- disable_aslr = ('set -e; '
- 'if [[ -e /proc/sys/kernel/randomize_va_space ]]; then '
- ' echo 0 > /proc/sys/kernel/randomize_va_space; '
- 'fi')
- if self.log_level == 'average':
- self.logger.LogOutput('Disable ASLR.')
- self.RunCommandOnDut(disable_aslr)
-
- def SetCpuGovernor(self, governor, ignore_status=False):
- """Setup CPU Governor on DUT."""
- set_gov_cmd = (
- 'for f in `ls -d /sys/devices/system/cpu/cpu*/cpufreq 2>/dev/null`; do '
- # Skip writing scaling_governor if cpu is offline.
- ' [[ -e ${f/cpufreq/online} ]] && grep -q 0 ${f/cpufreq/online} '
- ' && continue; '
- ' cd $f; '
- ' if [[ -e scaling_governor ]]; then '
- ' echo %s > scaling_governor; fi; '
- 'done; ')
- if self.log_level == 'average':
- self.logger.LogOutput('Setup CPU Governor: %s.' % governor)
- ret, _, _ = self.RunCommandOnDut(
- set_gov_cmd % governor, ignore_status=ignore_status)
- return ret
-
- def DisableTurbo(self):
- """Disable Turbo on DUT."""
- dis_turbo_cmd = (
- 'if [[ -e /sys/devices/system/cpu/intel_pstate/no_turbo ]]; then '
- ' if grep -q 0 /sys/devices/system/cpu/intel_pstate/no_turbo; then '
- ' echo -n 1 > /sys/devices/system/cpu/intel_pstate/no_turbo; '
- ' fi; '
- 'fi; ')
- if self.log_level == 'average':
- self.logger.LogOutput('Disable Turbo.')
- self.RunCommandOnDut(dis_turbo_cmd)
-
- def SetupCpuUsage(self):
- """Setup CPU usage.
-
- Based on self.dut_config['cpu_usage'] configure CPU cores
- utilization.
- """
-
- if (self.dut_config['cpu_usage'] == 'big_only' or
- self.dut_config['cpu_usage'] == 'little_only'):
- _, arch, _ = self.RunCommandOnDut('uname -m')
-
- if arch.lower().startswith('arm') or arch.lower().startswith('aarch64'):
- self.SetupArmCores()
-
- def SetupArmCores(self):
- """Setup ARM big/little cores."""
-
- # CPU implemeters/part numbers of big/LITTLE CPU.
- # Format: dict(CPU implementer: set(CPU part numbers))
- LITTLE_CORES = {
- '0x41': {
- '0xd01', # Cortex A32
- '0xd03', # Cortex A53
- '0xd04', # Cortex A35
- '0xd05', # Cortex A55
- },
- }
- BIG_CORES = {
- '0x41': {
- '0xd07', # Cortex A57
- '0xd08', # Cortex A72
- '0xd09', # Cortex A73
- '0xd0a', # Cortex A75
- '0xd0b', # Cortex A76
- },
- }
-
- # Values of CPU Implementer and CPU part number are exposed by cpuinfo.
- # Format:
- # =================
- # processor : 0
- # model name : ARMv8 Processor rev 4 (v8l)
- # BogoMIPS : 48.00
- # Features : half thumb fastmult vfp edsp neon vfpv3 tls vfpv4
- # CPU implementer : 0x41
- # CPU architecture: 8
- # CPU variant : 0x0
- # CPU part : 0xd03
- # CPU revision : 4
-
- _, cpuinfo, _ = self.RunCommandOnDut('cat /proc/cpuinfo')
-
- # List of all CPU cores: 0, 1, ..
- proc_matches = re.findall(r'^processor\s*: (\d+)$', cpuinfo, re.MULTILINE)
- # List of all corresponding CPU implementers
- impl_matches = re.findall(r'^CPU implementer\s*: (0x[\da-f]+)$', cpuinfo,
- re.MULTILINE)
- # List of all corresponding CPU part numbers
- part_matches = re.findall(r'^CPU part\s*: (0x[\da-f]+)$', cpuinfo,
- re.MULTILINE)
- assert len(proc_matches) == len(impl_matches)
- assert len(part_matches) == len(impl_matches)
-
- all_cores = set(proc_matches)
- dut_big_cores = {
- core
- for core, impl, part in zip(proc_matches, impl_matches, part_matches)
- if impl in BIG_CORES and part in BIG_CORES[impl]
- }
- dut_lit_cores = {
- core
- for core, impl, part in zip(proc_matches, impl_matches, part_matches)
- if impl in LITTLE_CORES and part in LITTLE_CORES[impl]
- }
-
- if self.dut_config['cpu_usage'] == 'big_only':
- cores_to_enable = dut_big_cores
- cores_to_disable = all_cores - dut_big_cores
- elif self.dut_config['cpu_usage'] == 'little_only':
- cores_to_enable = dut_lit_cores
- cores_to_disable = all_cores - dut_lit_cores
- else:
- self.logger.LogError(
- 'cpu_usage=%s is not supported on ARM.\n'
- 'Ignore ARM CPU setup and continue.' % self.dut_config['cpu_usage'])
- return
-
- if cores_to_enable:
- cmd_enable_cores = ('echo 1 | tee /sys/devices/system/cpu/cpu{%s}/online'
- % ','.join(sorted(cores_to_enable)))
-
- cmd_disable_cores = ''
- if cores_to_disable:
- cmd_disable_cores = (
- 'echo 0 | tee /sys/devices/system/cpu/cpu{%s}/online' % ','.join(
- sorted(cores_to_disable)))
-
- self.RunCommandOnDut('; '.join([cmd_enable_cores, cmd_disable_cores]))
- else:
- # If there are no cores enabled by dut_config then configuration
- # is invalid for current platform and should be ignored.
- self.logger.LogError(
- '"cpu_usage" is invalid for targeted platform.\n'
- 'dut_config[cpu_usage]=%s\n'
- 'dut big cores: %s\n'
- 'dut little cores: %s\n'
- 'Ignore ARM CPU setup and continue.' % (self.dut_config['cpu_usage'],
- dut_big_cores, dut_lit_cores))
-
- def GetCpuOnline(self):
- """Get online status of CPU cores.
-
- Return dict of {int(cpu_num): <0|1>}.
- """
- get_cpu_online_cmd = ('paste -d" "'
- ' <(ls /sys/devices/system/cpu/cpu*/online)'
- ' <(cat /sys/devices/system/cpu/cpu*/online)')
- _, online_output_str, _ = self.RunCommandOnDut(get_cpu_online_cmd)
-
- # Here is the output we expect to see:
- # -----------------
- # /sys/devices/system/cpu/cpu0/online 0
- # /sys/devices/system/cpu/cpu1/online 1
-
- cpu_online = {}
- cpu_online_match = re.compile(r'^[/\S]+/cpu(\d+)/[/\S]+\s+(\d+)$')
- for line in online_output_str.splitlines():
- match = cpu_online_match.match(line)
- if match:
- cpu = int(match.group(1))
- status = int(match.group(2))
- cpu_online[cpu] = status
- # At least one CPU has to be online.
- assert cpu_online
-
- return cpu_online
-
- def SetupCpuFreq(self, online_cores):
- """Setup CPU frequency.
-
- Based on self.dut_config['cpu_freq_pct'] setup frequency of online CPU cores
- to a supported value which is less or equal to (freq_pct * max_freq / 100)
- limited by min_freq.
-
- NOTE: scaling_available_frequencies support is required.
- Otherwise the function has no effect.
- """
- freq_percent = self.dut_config['cpu_freq_pct']
- list_all_avail_freq_cmd = ('ls /sys/devices/system/cpu/cpu{%s}/cpufreq/'
- 'scaling_available_frequencies')
- # Ignore error to support general usage of frequency setup.
- # Not all platforms support scaling_available_frequencies.
- ret, all_avail_freq_str, _ = self.RunCommandOnDut(
- list_all_avail_freq_cmd % ','.join(str(core) for core in online_cores),
- ignore_status=True)
- if ret or not all_avail_freq_str:
- # No scalable frequencies available for the core.
- return ret
- for avail_freq_path in all_avail_freq_str.split():
- # Get available freq from every scaling_available_frequency path.
- # Error is considered fatal in self.RunCommandOnDut().
- _, avail_freq_str, _ = self.RunCommandOnDut('cat ' + avail_freq_path)
- assert avail_freq_str
-
- all_avail_freq = sorted(
- int(freq_str) for freq_str in avail_freq_str.split())
- min_freq = all_avail_freq[0]
- max_freq = all_avail_freq[-1]
- # Calculate the frequency we are targeting.
- target_freq = round(max_freq * freq_percent / 100)
- # More likely it's not in the list of supported frequencies
- # and our goal is to find the one which is less or equal.
- # Default is min and we will try to maximize it.
- avail_ngt_target = min_freq
- # Find the largest not greater than the target.
- for next_largest in reversed(all_avail_freq):
- if next_largest <= target_freq:
- avail_ngt_target = next_largest
- break
-
- max_freq_path = avail_freq_path.replace('scaling_available_frequencies',
- 'scaling_max_freq')
- min_freq_path = avail_freq_path.replace('scaling_available_frequencies',
- 'scaling_min_freq')
- # With default ignore_status=False we expect 0 status or Fatal error.
- self.RunCommandOnDut('echo %s | tee %s %s' %
- (avail_ngt_target, max_freq_path, min_freq_path))
-
- def WaitCooldown(self):
- """Wait for DUT to cool down to certain temperature."""
- waittime = 0
- timeout_in_sec = int(self.dut_config['cooldown_time']) * 60
- # Temperature from sensors come in uCelsius units.
- temp_in_ucels = int(self.dut_config['cooldown_temp']) * 1000
- sleep_interval = 30
-
- # Wait until any of two events occurs:
- # 1. CPU cools down to a specified temperature.
- # 2. Timeout cooldown_time expires.
- # For the case when targeted temperature is not reached within specified
- # timeout the benchmark is going to start with higher initial CPU temp.
- # In the worst case it may affect test results but at the same time we
- # guarantee the upper bound of waiting time.
- # TODO(denik): Report (or highlight) "high" CPU temperature in test results.
- # "high" should be calculated based on empirical data per platform.
- # Based on such reports we can adjust CPU configuration or
- # cooldown limits accordingly.
- while waittime < timeout_in_sec:
- _, temp_output, _ = self.RunCommandOnDut(
- 'cat /sys/class/thermal/thermal_zone*/temp', ignore_status=True)
- if any(int(temp) > temp_in_ucels for temp in temp_output.split()):
- time.sleep(sleep_interval)
- waittime += sleep_interval
- else:
- # Exit the loop when:
- # 1. Reported temp numbers from all thermal sensors do not exceed
- # 'cooldown_temp' or
- # 2. No data from the sensors.
- break
-
- self.logger.LogOutput('Cooldown wait time: %.1f min' % (waittime / 60))
- return waittime
-
- def DecreaseWaitTime(self):
- """Change the ten seconds wait time for pagecycler to two seconds."""
- FILE = '/usr/local/telemetry/src/tools/perf/page_sets/page_cycler_story.py'
- ret = self.RunCommandOnDut('ls ' + FILE)
-
- if not ret:
- sed_command = 'sed -i "s/_TTI_WAIT_TIME = 10/_TTI_WAIT_TIME = 2/g" '
- self.RunCommandOnDut(sed_command + FILE)
-
- def StopUI(self):
- """Stop UI on DUT."""
- # Added "ignore_status" for the case when crosperf stops ui service which
- # was already stopped. Command is going to fail with 1.
- self.RunCommandOnDut('stop ui', ignore_status=True)
-
- def StartUI(self):
- """Start UI on DUT."""
- # Similar to StopUI, `start ui` fails if the service is already started.
- self.RunCommandOnDut('start ui', ignore_status=True)
-
- def KerncmdUpdateNeeded(self, intel_pstate):
- """Check whether kernel cmdline update is needed.
-
- Args:
- intel_pstate: kernel command line argument (active, passive, no_hwp)
-
- Returns:
- True if update is needed.
- """
-
- good = 0
-
- # Check that dut platform supports hwp
- cmd = "grep -q '^flags.*hwp' /proc/cpuinfo"
- ret_code, _, _ = self.RunCommandOnDut(cmd, ignore_status=True)
- if ret_code != good:
- # Intel hwp is not supported, update is not needed.
- return False
-
- kern_cmdline_cmd = 'grep -q "intel_pstate=%s" /proc/cmdline' % intel_pstate
- ret_code, _, _ = self.RunCommandOnDut(kern_cmdline_cmd, ignore_status=True)
- self.logger.LogOutput('grep /proc/cmdline returned %d' % ret_code)
- if (intel_pstate and ret_code == good or
- not intel_pstate and ret_code != good):
- # No need to updated cmdline if:
- # 1. We are setting intel_pstate and we found it is already set.
- # 2. Not using intel_pstate and it is not in cmdline.
- return False
-
- # Otherwise we need to update intel_pstate.
- return True
-
- def UpdateKerncmdIntelPstate(self, intel_pstate):
- """Update kernel command line.
-
- Args:
- intel_pstate: kernel command line argument (active, passive, no_hwp)
- """
-
- good = 0
-
- # First phase is to remove rootfs verification to allow cmdline change.
- remove_verif_cmd = ' '.join([
- '/usr/share/vboot/bin/make_dev_ssd.sh',
- '--remove_rootfs_verification',
- '--partition %d',
- ])
- # Command for partition 2.
- verif_part2_failed, _, _ = self.RunCommandOnDut(
- remove_verif_cmd % 2, ignore_status=True)
- # Command for partition 4
- # Some machines in the lab use partition 4 to boot from,
- # so cmdline should be update for both partitions.
- verif_part4_failed, _, _ = self.RunCommandOnDut(
- remove_verif_cmd % 4, ignore_status=True)
- if verif_part2_failed or verif_part4_failed:
- self.logger.LogFatal(
- 'ERROR. Failed to update kernel cmdline on partition %d.\n'
- 'Remove verification failed with status %d' %
- (2 if verif_part2_failed else 4, verif_part2_failed or
- verif_part4_failed))
-
- self.RunCommandOnDut('reboot && exit')
- # Give enough time for dut to complete reboot
- # TODO(denik): Replace with the function checking machine availability.
- time.sleep(30)
-
- # Second phase to update intel_pstate in kernel cmdline.
- kern_cmdline = '\n'.join([
- 'tmpfile=$(mktemp)',
- 'partnumb=%d',
- 'pstate=%s',
- # Store kernel cmdline in a temp file.
- '/usr/share/vboot/bin/make_dev_ssd.sh --partition ${partnumb}'
- ' --save_config ${tmpfile}',
- # Remove intel_pstate argument if present.
- "sed -i -r 's/ intel_pstate=[A-Za-z_]+//g' ${tmpfile}.${partnumb}",
- # Insert intel_pstate with a new value if it is set.
- '[[ -n ${pstate} ]] &&'
- ' sed -i -e \"s/ *$/ intel_pstate=${pstate}/\" ${tmpfile}.${partnumb}',
- # Save the change in kernel cmdline.
- # After completion we have to reboot.
- '/usr/share/vboot/bin/make_dev_ssd.sh --partition ${partnumb}'
- ' --set_config ${tmpfile}'
- ])
- kern_part2_cmdline_cmd = kern_cmdline % (2, intel_pstate)
- self.logger.LogOutput(
- 'Command to change kernel command line: %s' % kern_part2_cmdline_cmd)
- upd_part2_failed, _, _ = self.RunCommandOnDut(
- kern_part2_cmdline_cmd, ignore_status=True)
- # Again here we are updating cmdline for partition 4
- # in addition to partition 2. Without this some machines
- # in the lab might fail.
- kern_part4_cmdline_cmd = kern_cmdline % (4, intel_pstate)
- self.logger.LogOutput(
- 'Command to change kernel command line: %s' % kern_part4_cmdline_cmd)
- upd_part4_failed, _, _ = self.RunCommandOnDut(
- kern_part4_cmdline_cmd, ignore_status=True)
- if upd_part2_failed or upd_part4_failed:
- self.logger.LogFatal(
- 'ERROR. Failed to update kernel cmdline on partition %d.\n'
- 'intel_pstate update failed with status %d' %
- (2 if upd_part2_failed else 4, upd_part2_failed or upd_part4_failed))
-
- self.RunCommandOnDut('reboot && exit')
- # Wait 30s after reboot.
- time.sleep(30)
-
- # Verification phase.
- # Check that cmdline was updated.
- # Throw an exception if not.
- kern_cmdline_cmd = 'grep -q "intel_pstate=%s" /proc/cmdline' % intel_pstate
- ret_code, _, _ = self.RunCommandOnDut(kern_cmdline_cmd, ignore_status=True)
- if (intel_pstate and ret_code != good or
- not intel_pstate and ret_code == good):
- # Kernel cmdline doesn't match input intel_pstate.
- self.logger.LogFatal(
- 'ERROR. Failed to update kernel cmdline. '
- 'Final verification failed with status %d' % ret_code)
-
- self.logger.LogOutput('Kernel cmdline updated successfully.')
-
- @contextmanager
- def PauseUI(self):
- """Stop UI before and Start UI after the context block.
-
- Context manager will make sure UI is always resumed at the end.
- """
- self.StopUI()
- try:
- yield
-
- finally:
- self.StartUI()
-
- def SetupDevice(self):
- """Setup device to get it ready for testing.
-
- @Returns Wait time of cool down for this benchmark run.
- """
- self.logger.LogOutput('Update kernel cmdline if necessary and reboot')
- intel_pstate = self.dut_config['intel_pstate']
- if intel_pstate and self.KerncmdUpdateNeeded(intel_pstate):
- self.UpdateKerncmdIntelPstate(intel_pstate)
-
- wait_time = 0
- # Pause UI while configuring the DUT.
- # This will accelerate setup (waiting for cooldown has x10 drop)
- # and help to reset a Chrome state left after the previous test.
- with self.PauseUI():
- # Unless the user turns on ASLR in the flag, we first disable ASLR
- # before running the benchmarks
- if not self.dut_config['enable_aslr']:
- self.DisableASLR()
-
- # CPU usage setup comes first where we enable/disable cores.
- self.SetupCpuUsage()
- cpu_online_status = self.GetCpuOnline()
- # List of online cores of type int (core number).
- online_cores = [
- core for core, status in cpu_online_status.items() if status
- ]
- if self.dut_config['cooldown_time']:
- # Setup power conservative mode for effective cool down.
- # Set ignore status since powersave may no be available
- # on all platforms and we are going to handle it.
- ret = self.SetCpuGovernor('powersave', ignore_status=True)
+ """Wrap DUT parameters inside."""
+
+ def __init__(
+ self,
+ chromeos_root,
+ remote,
+ log_level="verbose",
+ logger=None,
+ ce=None,
+ dut_config=None,
+ ):
+ self.chromeos_root = chromeos_root
+ self.remote = remote
+ self.log_level = log_level
+ self.logger = logger
+ self.ce = ce or command_executer.GetCommandExecuter(log_level=log_level)
+ self.dut_config = dut_config
+
+ def RunCommandOnDut(self, command, ignore_status=False):
+ """Helper function to run command on DUT."""
+ ret, msg, err_msg = self.ce.CrosRunCommandWOutput(
+ command, machine=self.remote, chromeos_root=self.chromeos_root
+ )
+
if ret:
- # "powersave" is not available, use "ondemand".
- # Still not a fatal error if it fails.
- ret = self.SetCpuGovernor('ondemand', ignore_status=True)
- # TODO(denik): Run comparison test for 'powersave' and 'ondemand'
- # on scarlet and kevin64.
- # We might have to consider reducing freq manually to the min
- # if it helps to reduce waiting time.
- wait_time = self.WaitCooldown()
-
- # Setup CPU governor for the benchmark run.
- # It overwrites the previous governor settings.
- governor = self.dut_config['governor']
- # FIXME(denik): Pass online cores to governor setup.
- self.SetCpuGovernor(governor)
-
- # Disable Turbo and Setup CPU freq should ALWAYS proceed governor setup
- # since governor may change:
- # - frequency;
- # - turbo/boost.
- self.DisableTurbo()
- self.SetupCpuFreq(online_cores)
-
- self.DecreaseWaitTime()
- # FIXME(denik): Currently we are not recovering the previous cpufreq
- # settings since we do reboot/setup every time anyway.
- # But it may change in the future and then we have to recover the
- # settings.
- return wait_time
+ err_msg = (
+ "Command execution on DUT %s failed.\n"
+ "Failing command: %s\n"
+ "returned %d\n"
+ "Error message: %s" % (self.remote, command, ret, err_msg)
+ )
+ if ignore_status:
+ self.logger.LogError(
+ err_msg + "\n(Failure is considered non-fatal. Continue.)"
+ )
+ else:
+ self.logger.LogFatal(err_msg)
+
+ return ret, msg, err_msg
+
+ def DisableASLR(self):
+ """Disable ASLR on DUT."""
+ disable_aslr = (
+ "set -e; "
+ "if [[ -e /proc/sys/kernel/randomize_va_space ]]; then "
+ " echo 0 > /proc/sys/kernel/randomize_va_space; "
+ "fi"
+ )
+ if self.log_level == "average":
+ self.logger.LogOutput("Disable ASLR.")
+ self.RunCommandOnDut(disable_aslr)
+
+ def SetCpuGovernor(self, governor, ignore_status=False):
+ """Setup CPU Governor on DUT."""
+ set_gov_cmd = (
+ "for f in `ls -d /sys/devices/system/cpu/cpu*/cpufreq 2>/dev/null`; do "
+ # Skip writing scaling_governor if cpu is offline.
+ " [[ -e ${f/cpufreq/online} ]] && grep -q 0 ${f/cpufreq/online} "
+ " && continue; "
+ " cd $f; "
+ " if [[ -e scaling_governor ]]; then "
+ " echo %s > scaling_governor; fi; "
+ "done; "
+ )
+ if self.log_level == "average":
+ self.logger.LogOutput("Setup CPU Governor: %s." % governor)
+ ret, _, _ = self.RunCommandOnDut(
+ set_gov_cmd % governor, ignore_status=ignore_status
+ )
+ return ret
+
+ def DisableTurbo(self):
+ """Disable Turbo on DUT."""
+ dis_turbo_cmd = (
+ "if [[ -e /sys/devices/system/cpu/intel_pstate/no_turbo ]]; then "
+ " if grep -q 0 /sys/devices/system/cpu/intel_pstate/no_turbo; then "
+ " echo -n 1 > /sys/devices/system/cpu/intel_pstate/no_turbo; "
+ " fi; "
+ "fi; "
+ )
+ if self.log_level == "average":
+ self.logger.LogOutput("Disable Turbo.")
+ self.RunCommandOnDut(dis_turbo_cmd)
+
+ def SetupCpuUsage(self):
+ """Setup CPU usage.
+
+ Based on self.dut_config['cpu_usage'] configure CPU cores
+ utilization.
+ """
+
+ if (
+ self.dut_config["cpu_usage"] == "big_only"
+ or self.dut_config["cpu_usage"] == "little_only"
+ ):
+ _, arch, _ = self.RunCommandOnDut("uname -m")
+
+ if arch.lower().startswith("arm") or arch.lower().startswith(
+ "aarch64"
+ ):
+ self.SetupArmCores()
+
+ def SetupArmCores(self):
+ """Setup ARM big/little cores."""
+
+ # CPU implemeters/part numbers of big/LITTLE CPU.
+ # Format: dict(CPU implementer: set(CPU part numbers))
+ LITTLE_CORES = {
+ "0x41": {
+ "0xd01", # Cortex A32
+ "0xd03", # Cortex A53
+ "0xd04", # Cortex A35
+ "0xd05", # Cortex A55
+ },
+ }
+ BIG_CORES = {
+ "0x41": {
+ "0xd07", # Cortex A57
+ "0xd08", # Cortex A72
+ "0xd09", # Cortex A73
+ "0xd0a", # Cortex A75
+ "0xd0b", # Cortex A76
+ },
+ }
+
+ # Values of CPU Implementer and CPU part number are exposed by cpuinfo.
+ # Format:
+ # =================
+ # processor : 0
+ # model name : ARMv8 Processor rev 4 (v8l)
+ # BogoMIPS : 48.00
+ # Features : half thumb fastmult vfp edsp neon vfpv3 tls vfpv4
+ # CPU implementer : 0x41
+ # CPU architecture: 8
+ # CPU variant : 0x0
+ # CPU part : 0xd03
+ # CPU revision : 4
+
+ _, cpuinfo, _ = self.RunCommandOnDut("cat /proc/cpuinfo")
+
+ # List of all CPU cores: 0, 1, ..
+ proc_matches = re.findall(
+ r"^processor\s*: (\d+)$", cpuinfo, re.MULTILINE
+ )
+ # List of all corresponding CPU implementers
+ impl_matches = re.findall(
+ r"^CPU implementer\s*: (0x[\da-f]+)$", cpuinfo, re.MULTILINE
+ )
+ # List of all corresponding CPU part numbers
+ part_matches = re.findall(
+ r"^CPU part\s*: (0x[\da-f]+)$", cpuinfo, re.MULTILINE
+ )
+ assert len(proc_matches) == len(impl_matches)
+ assert len(part_matches) == len(impl_matches)
+
+ all_cores = set(proc_matches)
+ dut_big_cores = {
+ core
+ for core, impl, part in zip(
+ proc_matches, impl_matches, part_matches
+ )
+ if impl in BIG_CORES and part in BIG_CORES[impl]
+ }
+ dut_lit_cores = {
+ core
+ for core, impl, part in zip(
+ proc_matches, impl_matches, part_matches
+ )
+ if impl in LITTLE_CORES and part in LITTLE_CORES[impl]
+ }
+
+ if self.dut_config["cpu_usage"] == "big_only":
+ cores_to_enable = dut_big_cores
+ cores_to_disable = all_cores - dut_big_cores
+ elif self.dut_config["cpu_usage"] == "little_only":
+ cores_to_enable = dut_lit_cores
+ cores_to_disable = all_cores - dut_lit_cores
+ else:
+ self.logger.LogError(
+ "cpu_usage=%s is not supported on ARM.\n"
+ "Ignore ARM CPU setup and continue."
+ % self.dut_config["cpu_usage"]
+ )
+ return
+
+ if cores_to_enable:
+ cmd_enable_cores = (
+ "echo 1 | tee /sys/devices/system/cpu/cpu{%s}/online"
+ % ",".join(sorted(cores_to_enable))
+ )
+
+ cmd_disable_cores = ""
+ if cores_to_disable:
+ cmd_disable_cores = (
+ "echo 0 | tee /sys/devices/system/cpu/cpu{%s}/online"
+ % ",".join(sorted(cores_to_disable))
+ )
+
+ self.RunCommandOnDut(
+ "; ".join([cmd_enable_cores, cmd_disable_cores])
+ )
+ else:
+ # If there are no cores enabled by dut_config then configuration
+ # is invalid for current platform and should be ignored.
+ self.logger.LogError(
+ '"cpu_usage" is invalid for targeted platform.\n'
+ "dut_config[cpu_usage]=%s\n"
+ "dut big cores: %s\n"
+ "dut little cores: %s\n"
+ "Ignore ARM CPU setup and continue."
+ % (self.dut_config["cpu_usage"], dut_big_cores, dut_lit_cores)
+ )
+
+ def GetCpuOnline(self):
+ """Get online status of CPU cores.
+
+ Return dict of {int(cpu_num): <0|1>}.
+ """
+ get_cpu_online_cmd = (
+ 'paste -d" "'
+ " <(ls /sys/devices/system/cpu/cpu*/online)"
+ " <(cat /sys/devices/system/cpu/cpu*/online)"
+ )
+ _, online_output_str, _ = self.RunCommandOnDut(get_cpu_online_cmd)
+
+ # Here is the output we expect to see:
+ # -----------------
+ # /sys/devices/system/cpu/cpu0/online 0
+ # /sys/devices/system/cpu/cpu1/online 1
+
+ cpu_online = {}
+ cpu_online_match = re.compile(r"^[/\S]+/cpu(\d+)/[/\S]+\s+(\d+)$")
+ for line in online_output_str.splitlines():
+ match = cpu_online_match.match(line)
+ if match:
+ cpu = int(match.group(1))
+ status = int(match.group(2))
+ cpu_online[cpu] = status
+ # At least one CPU has to be online.
+ assert cpu_online
+
+ return cpu_online
+
+ def SetupCpuFreq(self, online_cores):
+ """Setup CPU frequency.
+
+ Based on self.dut_config['cpu_freq_pct'] setup frequency of online CPU cores
+ to a supported value which is less or equal to (freq_pct * max_freq / 100)
+ limited by min_freq.
+
+ NOTE: scaling_available_frequencies support is required.
+ Otherwise the function has no effect.
+ """
+ freq_percent = self.dut_config["cpu_freq_pct"]
+ list_all_avail_freq_cmd = (
+ "ls /sys/devices/system/cpu/cpu{%s}/cpufreq/"
+ "scaling_available_frequencies"
+ )
+ # Ignore error to support general usage of frequency setup.
+ # Not all platforms support scaling_available_frequencies.
+ ret, all_avail_freq_str, _ = self.RunCommandOnDut(
+ list_all_avail_freq_cmd
+ % ",".join(str(core) for core in online_cores),
+ ignore_status=True,
+ )
+ if ret or not all_avail_freq_str:
+ # No scalable frequencies available for the core.
+ return ret
+ for avail_freq_path in all_avail_freq_str.split():
+ # Get available freq from every scaling_available_frequency path.
+ # Error is considered fatal in self.RunCommandOnDut().
+ _, avail_freq_str, _ = self.RunCommandOnDut(
+ "cat " + avail_freq_path
+ )
+ assert avail_freq_str
+
+ all_avail_freq = sorted(
+ int(freq_str) for freq_str in avail_freq_str.split()
+ )
+ min_freq = all_avail_freq[0]
+ max_freq = all_avail_freq[-1]
+ # Calculate the frequency we are targeting.
+ target_freq = round(max_freq * freq_percent / 100)
+ # More likely it's not in the list of supported frequencies
+ # and our goal is to find the one which is less or equal.
+ # Default is min and we will try to maximize it.
+ avail_ngt_target = min_freq
+ # Find the largest not greater than the target.
+ for next_largest in reversed(all_avail_freq):
+ if next_largest <= target_freq:
+ avail_ngt_target = next_largest
+ break
+
+ max_freq_path = avail_freq_path.replace(
+ "scaling_available_frequencies", "scaling_max_freq"
+ )
+ min_freq_path = avail_freq_path.replace(
+ "scaling_available_frequencies", "scaling_min_freq"
+ )
+ # With default ignore_status=False we expect 0 status or Fatal error.
+ self.RunCommandOnDut(
+ "echo %s | tee %s %s"
+ % (avail_ngt_target, max_freq_path, min_freq_path)
+ )
+
+ def WaitCooldown(self):
+ """Wait for DUT to cool down to certain temperature."""
+ waittime = 0
+ timeout_in_sec = int(self.dut_config["cooldown_time"]) * 60
+ # Temperature from sensors come in uCelsius units.
+ temp_in_ucels = int(self.dut_config["cooldown_temp"]) * 1000
+ sleep_interval = 30
+
+ # Wait until any of two events occurs:
+ # 1. CPU cools down to a specified temperature.
+ # 2. Timeout cooldown_time expires.
+ # For the case when targeted temperature is not reached within specified
+ # timeout the benchmark is going to start with higher initial CPU temp.
+ # In the worst case it may affect test results but at the same time we
+ # guarantee the upper bound of waiting time.
+ # TODO(denik): Report (or highlight) "high" CPU temperature in test results.
+ # "high" should be calculated based on empirical data per platform.
+ # Based on such reports we can adjust CPU configuration or
+ # cooldown limits accordingly.
+ while waittime < timeout_in_sec:
+ _, temp_output, _ = self.RunCommandOnDut(
+ "cat /sys/class/thermal/thermal_zone*/temp", ignore_status=True
+ )
+ if any(int(temp) > temp_in_ucels for temp in temp_output.split()):
+ time.sleep(sleep_interval)
+ waittime += sleep_interval
+ else:
+ # Exit the loop when:
+ # 1. Reported temp numbers from all thermal sensors do not exceed
+ # 'cooldown_temp' or
+ # 2. No data from the sensors.
+ break
+
+ self.logger.LogOutput("Cooldown wait time: %.1f min" % (waittime / 60))
+ return waittime
+
+ def DecreaseWaitTime(self):
+ """Change the ten seconds wait time for pagecycler to two seconds."""
+ FILE = (
+ "/usr/local/telemetry/src/tools/perf/page_sets/page_cycler_story.py"
+ )
+ ret = self.RunCommandOnDut("ls " + FILE)
+
+ if not ret:
+ sed_command = 'sed -i "s/_TTI_WAIT_TIME = 10/_TTI_WAIT_TIME = 2/g" '
+ self.RunCommandOnDut(sed_command + FILE)
+
+ def StopUI(self):
+ """Stop UI on DUT."""
+ # Added "ignore_status" for the case when crosperf stops ui service which
+ # was already stopped. Command is going to fail with 1.
+ self.RunCommandOnDut("stop ui", ignore_status=True)
+
+ def StartUI(self):
+ """Start UI on DUT."""
+ # Similar to StopUI, `start ui` fails if the service is already started.
+ self.RunCommandOnDut("start ui", ignore_status=True)
+
+ def KerncmdUpdateNeeded(self, intel_pstate):
+ """Check whether kernel cmdline update is needed.
+
+ Args:
+ intel_pstate: kernel command line argument (active, passive, no_hwp)
+
+ Returns:
+ True if update is needed.
+ """
+
+ good = 0
+
+ # Check that dut platform supports hwp
+ cmd = "grep -q '^flags.*hwp' /proc/cpuinfo"
+ ret_code, _, _ = self.RunCommandOnDut(cmd, ignore_status=True)
+ if ret_code != good:
+ # Intel hwp is not supported, update is not needed.
+ return False
+
+ kern_cmdline_cmd = (
+ 'grep -q "intel_pstate=%s" /proc/cmdline' % intel_pstate
+ )
+ ret_code, _, _ = self.RunCommandOnDut(
+ kern_cmdline_cmd, ignore_status=True
+ )
+ self.logger.LogOutput("grep /proc/cmdline returned %d" % ret_code)
+ if (
+ intel_pstate
+ and ret_code == good
+ or not intel_pstate
+ and ret_code != good
+ ):
+ # No need to updated cmdline if:
+ # 1. We are setting intel_pstate and we found it is already set.
+ # 2. Not using intel_pstate and it is not in cmdline.
+ return False
+
+ # Otherwise we need to update intel_pstate.
+ return True
+
+ def UpdateKerncmdIntelPstate(self, intel_pstate):
+ """Update kernel command line.
+
+ Args:
+ intel_pstate: kernel command line argument (active, passive, no_hwp)
+ """
+
+ good = 0
+
+ # First phase is to remove rootfs verification to allow cmdline change.
+ remove_verif_cmd = " ".join(
+ [
+ "/usr/share/vboot/bin/make_dev_ssd.sh",
+ "--remove_rootfs_verification",
+ "--partition %d",
+ ]
+ )
+ # Command for partition 2.
+ verif_part2_failed, _, _ = self.RunCommandOnDut(
+ remove_verif_cmd % 2, ignore_status=True
+ )
+ # Command for partition 4
+ # Some machines in the lab use partition 4 to boot from,
+ # so cmdline should be update for both partitions.
+ verif_part4_failed, _, _ = self.RunCommandOnDut(
+ remove_verif_cmd % 4, ignore_status=True
+ )
+ if verif_part2_failed or verif_part4_failed:
+ self.logger.LogFatal(
+ "ERROR. Failed to update kernel cmdline on partition %d.\n"
+ "Remove verification failed with status %d"
+ % (
+ 2 if verif_part2_failed else 4,
+ verif_part2_failed or verif_part4_failed,
+ )
+ )
+
+ self.RunCommandOnDut("reboot && exit")
+ # Give enough time for dut to complete reboot
+ # TODO(denik): Replace with the function checking machine availability.
+ time.sleep(30)
+
+ # Second phase to update intel_pstate in kernel cmdline.
+ kern_cmdline = "\n".join(
+ [
+ "tmpfile=$(mktemp)",
+ "partnumb=%d",
+ "pstate=%s",
+ # Store kernel cmdline in a temp file.
+ "/usr/share/vboot/bin/make_dev_ssd.sh --partition ${partnumb}"
+ " --save_config ${tmpfile}",
+ # Remove intel_pstate argument if present.
+ "sed -i -r 's/ intel_pstate=[A-Za-z_]+//g' ${tmpfile}.${partnumb}",
+ # Insert intel_pstate with a new value if it is set.
+ "[[ -n ${pstate} ]] &&"
+ ' sed -i -e "s/ *$/ intel_pstate=${pstate}/" ${tmpfile}.${partnumb}',
+ # Save the change in kernel cmdline.
+ # After completion we have to reboot.
+ "/usr/share/vboot/bin/make_dev_ssd.sh --partition ${partnumb}"
+ " --set_config ${tmpfile}",
+ ]
+ )
+ kern_part2_cmdline_cmd = kern_cmdline % (2, intel_pstate)
+ self.logger.LogOutput(
+ "Command to change kernel command line: %s" % kern_part2_cmdline_cmd
+ )
+ upd_part2_failed, _, _ = self.RunCommandOnDut(
+ kern_part2_cmdline_cmd, ignore_status=True
+ )
+ # Again here we are updating cmdline for partition 4
+ # in addition to partition 2. Without this some machines
+ # in the lab might fail.
+ kern_part4_cmdline_cmd = kern_cmdline % (4, intel_pstate)
+ self.logger.LogOutput(
+ "Command to change kernel command line: %s" % kern_part4_cmdline_cmd
+ )
+ upd_part4_failed, _, _ = self.RunCommandOnDut(
+ kern_part4_cmdline_cmd, ignore_status=True
+ )
+ if upd_part2_failed or upd_part4_failed:
+ self.logger.LogFatal(
+ "ERROR. Failed to update kernel cmdline on partition %d.\n"
+ "intel_pstate update failed with status %d"
+ % (
+ 2 if upd_part2_failed else 4,
+ upd_part2_failed or upd_part4_failed,
+ )
+ )
+
+ self.RunCommandOnDut("reboot && exit")
+ # Wait 30s after reboot.
+ time.sleep(30)
+
+ # Verification phase.
+ # Check that cmdline was updated.
+ # Throw an exception if not.
+ kern_cmdline_cmd = (
+ 'grep -q "intel_pstate=%s" /proc/cmdline' % intel_pstate
+ )
+ ret_code, _, _ = self.RunCommandOnDut(
+ kern_cmdline_cmd, ignore_status=True
+ )
+ if (
+ intel_pstate
+ and ret_code != good
+ or not intel_pstate
+ and ret_code == good
+ ):
+ # Kernel cmdline doesn't match input intel_pstate.
+ self.logger.LogFatal(
+ "ERROR. Failed to update kernel cmdline. "
+ "Final verification failed with status %d" % ret_code
+ )
+
+ self.logger.LogOutput("Kernel cmdline updated successfully.")
+
+ @contextmanager
+ def PauseUI(self):
+ """Stop UI before and Start UI after the context block.
+
+ Context manager will make sure UI is always resumed at the end.
+ """
+ self.StopUI()
+ try:
+ yield
+
+ finally:
+ self.StartUI()
+
+ def SetupDevice(self):
+ """Setup device to get it ready for testing.
+
+ @Returns Wait time of cool down for this benchmark run.
+ """
+ self.logger.LogOutput("Update kernel cmdline if necessary and reboot")
+ intel_pstate = self.dut_config["intel_pstate"]
+ if intel_pstate and self.KerncmdUpdateNeeded(intel_pstate):
+ self.UpdateKerncmdIntelPstate(intel_pstate)
+
+ wait_time = 0
+ # Pause UI while configuring the DUT.
+ # This will accelerate setup (waiting for cooldown has x10 drop)
+ # and help to reset a Chrome state left after the previous test.
+ with self.PauseUI():
+ # Unless the user turns on ASLR in the flag, we first disable ASLR
+ # before running the benchmarks
+ if not self.dut_config["enable_aslr"]:
+ self.DisableASLR()
+
+ # CPU usage setup comes first where we enable/disable cores.
+ self.SetupCpuUsage()
+ cpu_online_status = self.GetCpuOnline()
+ # List of online cores of type int (core number).
+ online_cores = [
+ core for core, status in cpu_online_status.items() if status
+ ]
+ if self.dut_config["cooldown_time"]:
+ # Setup power conservative mode for effective cool down.
+ # Set ignore status since powersave may no be available
+ # on all platforms and we are going to handle it.
+ ret = self.SetCpuGovernor("powersave", ignore_status=True)
+ if ret:
+ # "powersave" is not available, use "ondemand".
+ # Still not a fatal error if it fails.
+ ret = self.SetCpuGovernor("ondemand", ignore_status=True)
+ # TODO(denik): Run comparison test for 'powersave' and 'ondemand'
+ # on scarlet and kevin64.
+ # We might have to consider reducing freq manually to the min
+ # if it helps to reduce waiting time.
+ wait_time = self.WaitCooldown()
+
+ # Setup CPU governor for the benchmark run.
+ # It overwrites the previous governor settings.
+ governor = self.dut_config["governor"]
+ # FIXME(denik): Pass online cores to governor setup.
+ self.SetCpuGovernor(governor)
+
+ # Disable Turbo and Setup CPU freq should ALWAYS proceed governor setup
+ # since governor may change:
+ # - frequency;
+ # - turbo/boost.
+ self.DisableTurbo()
+ self.SetupCpuFreq(online_cores)
+
+ self.DecreaseWaitTime()
+ # FIXME(denik): Currently we are not recovering the previous cpufreq
+ # settings since we do reboot/setup every time anyway.
+ # But it may change in the future and then we have to recover the
+ # settings.
+ return wait_time
diff --git a/cros_utils/device_setup_utils_unittest.py b/cros_utils/device_setup_utils_unittest.py
index 12a70811..d7339e25 100755
--- a/cros_utils/device_setup_utils_unittest.py
+++ b/cros_utils/device_setup_utils_unittest.py
@@ -1,16 +1,14 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittest for device_setup_utils."""
-from __future__ import print_function
import time
-
import unittest
from unittest import mock
@@ -18,6 +16,7 @@ from cros_utils import command_executer
from cros_utils import logger
from cros_utils.device_setup_utils import DutWrapper
+
BIG_LITTLE_CPUINFO = """processor : 0
model name : ARMv8 Processor rev 4 (v8l)
BogoMIPS : 48.00
@@ -94,609 +93,713 @@ Serial : 0000000000000000
class DutWrapperTest(unittest.TestCase):
- """Class of DutWrapper test."""
- real_logger = logger.GetLogger()
-
- mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter)
- mock_logger = mock.Mock(spec=logger.Logger)
-
- def __init__(self, *args, **kwargs):
- super(DutWrapperTest, self).__init__(*args, **kwargs)
-
- def setUp(self):
- self.dw = DutWrapper(
- '/tmp/chromeos',
- 'lumpy.cros2',
- log_level='verbose',
- logger=self.mock_logger,
- ce=self.mock_cmd_exec,
- dut_config={})
-
- @mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommandWOutput')
- def test_run_command_on_dut(self, mock_cros_runcmd):
- self.mock_cmd_exec.CrosRunCommandWOutput = mock_cros_runcmd
- mock_cros_runcmd.return_value = (0, '', '')
- mock_cros_runcmd.assert_not_called()
- self.dw.RunCommandOnDut('run command;')
- mock_cros_runcmd.assert_called_once_with(
- 'run command;', chromeos_root='/tmp/chromeos', machine='lumpy.cros2')
-
- @mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommandWOutput')
- def test_dut_wrapper_fatal_error(self, mock_cros_runcmd):
- self.mock_cmd_exec.CrosRunCommandWOutput = mock_cros_runcmd
- # Command returns error 1.
- mock_cros_runcmd.return_value = (1, '', 'Error!')
- mock_cros_runcmd.assert_not_called()
- self.dw.RunCommandOnDut('run command;')
- mock_cros_runcmd.assert_called_once_with(
- 'run command;', chromeos_root='/tmp/chromeos', machine='lumpy.cros2')
- # Error status causes log fatal.
- self.assertEqual(
- self.mock_logger.method_calls[-1],
- mock.call.LogFatal('Command execution on DUT lumpy.cros2 failed.\n'
- 'Failing command: run command;\nreturned 1\n'
- 'Error message: Error!'))
-
- @mock.patch.object(command_executer.CommandExecuter, 'CrosRunCommandWOutput')
- def test_dut_wrapper_ignore_error(self, mock_cros_runcmd):
- self.mock_cmd_exec.CrosRunCommandWOutput = mock_cros_runcmd
- # Command returns error 1.
- mock_cros_runcmd.return_value = (1, '', 'Error!')
- self.dw.RunCommandOnDut('run command;', ignore_status=True)
- mock_cros_runcmd.assert_called_once_with(
- 'run command;', chromeos_root='/tmp/chromeos', machine='lumpy.cros2')
- # Error status is not fatal. LogError records the error message.
- self.assertEqual(
- self.mock_logger.method_calls[-1],
- mock.call.LogError('Command execution on DUT lumpy.cros2 failed.\n'
- 'Failing command: run command;\nreturned 1\n'
- 'Error message: Error!\n'
- '(Failure is considered non-fatal. Continue.)'))
-
- def test_disable_aslr(self):
- self.dw.RunCommandOnDut = mock.Mock(return_value=(0, '', ''))
- self.dw.DisableASLR()
- # pyformat: disable
- set_cpu_cmd = ('set -e; '
- 'if [[ -e /proc/sys/kernel/randomize_va_space ]]; then '
- ' echo 0 > /proc/sys/kernel/randomize_va_space; '
- 'fi')
- self.dw.RunCommandOnDut.assert_called_once_with(set_cpu_cmd)
-
- def test_set_cpu_governor(self):
- self.dw.RunCommandOnDut = mock.Mock(return_value=(0, '', ''))
- self.dw.SetCpuGovernor('new_governor', ignore_status=False)
- set_cpu_cmd = (
- 'for f in `ls -d /sys/devices/system/cpu/cpu*/cpufreq 2>/dev/null`; do '
- # Skip writing scaling_governor if cpu is offline.
- ' [[ -e ${f/cpufreq/online} ]] && grep -q 0 ${f/cpufreq/online} '
- ' && continue; '
- ' cd $f; '
- ' if [[ -e scaling_governor ]]; then '
- ' echo %s > scaling_governor; fi; '
- 'done; ')
- self.dw.RunCommandOnDut.assert_called_once_with(
- set_cpu_cmd % 'new_governor', ignore_status=False)
-
- def test_set_cpu_governor_propagate_error(self):
- self.dw.RunCommandOnDut = mock.Mock(return_value=(1, '', 'Error.'))
- self.dw.SetCpuGovernor('non-exist_governor')
- set_cpu_cmd = (
- 'for f in `ls -d /sys/devices/system/cpu/cpu*/cpufreq 2>/dev/null`; do '
- # Skip writing scaling_governor if cpu is not online.
- ' [[ -e ${f/cpufreq/online} ]] && grep -q 0 ${f/cpufreq/online} '
- ' && continue; '
- ' cd $f; '
- ' if [[ -e scaling_governor ]]; then '
- ' echo %s > scaling_governor; fi; '
- 'done; ')
- # By default error status is fatal.
- self.dw.RunCommandOnDut.assert_called_once_with(
- set_cpu_cmd % 'non-exist_governor', ignore_status=False)
-
- def test_set_cpu_governor_ignore_status(self):
- self.dw.RunCommandOnDut = mock.Mock(return_value=(1, '', 'Error.'))
- ret_code = self.dw.SetCpuGovernor('non-exist_governor', ignore_status=True)
- set_cpu_cmd = (
- 'for f in `ls -d /sys/devices/system/cpu/cpu*/cpufreq 2>/dev/null`; do '
- # Skip writing scaling_governor if cpu is not online.
- ' [[ -e ${f/cpufreq/online} ]] && grep -q 0 ${f/cpufreq/online} '
- ' && continue; '
- ' cd $f; '
- ' if [[ -e scaling_governor ]]; then '
- ' echo %s > scaling_governor; fi; '
- 'done; ')
- self.dw.RunCommandOnDut.assert_called_once_with(
- set_cpu_cmd % 'non-exist_governor', ignore_status=True)
- self.assertEqual(ret_code, 1)
-
- def test_disable_turbo(self):
- self.dw.RunCommandOnDut = mock.Mock(return_value=(0, '', ''))
- self.dw.DisableTurbo()
- set_cpu_cmd = (
- # Disable Turbo in Intel pstate driver
- 'if [[ -e /sys/devices/system/cpu/intel_pstate/no_turbo ]]; then '
- ' if grep -q 0 /sys/devices/system/cpu/intel_pstate/no_turbo; then '
- ' echo -n 1 > /sys/devices/system/cpu/intel_pstate/no_turbo; '
- ' fi; '
- 'fi; ')
- self.dw.RunCommandOnDut.assert_called_once_with(set_cpu_cmd)
-
- def test_get_cpu_online_two(self):
- """Test one digit CPU #."""
- self.dw.RunCommandOnDut = mock.Mock(
- return_value=(0, '/sys/devices/system/cpu/cpu0/online 0\n'
- '/sys/devices/system/cpu/cpu1/online 1\n', ''))
- cpu_online = self.dw.GetCpuOnline()
- self.assertEqual(cpu_online, {0: 0, 1: 1})
-
- def test_get_cpu_online_twelve(self):
- """Test two digit CPU #."""
- self.dw.RunCommandOnDut = mock.Mock(
- return_value=(0, '/sys/devices/system/cpu/cpu0/online 1\n'
- '/sys/devices/system/cpu/cpu1/online 0\n'
- '/sys/devices/system/cpu/cpu10/online 1\n'
- '/sys/devices/system/cpu/cpu11/online 1\n'
- '/sys/devices/system/cpu/cpu2/online 1\n'
- '/sys/devices/system/cpu/cpu3/online 0\n'
- '/sys/devices/system/cpu/cpu4/online 1\n'
- '/sys/devices/system/cpu/cpu5/online 0\n'
- '/sys/devices/system/cpu/cpu6/online 1\n'
- '/sys/devices/system/cpu/cpu7/online 0\n'
- '/sys/devices/system/cpu/cpu8/online 1\n'
- '/sys/devices/system/cpu/cpu9/online 0\n', ''))
- cpu_online = self.dw.GetCpuOnline()
- self.assertEqual(cpu_online, {
- 0: 1,
- 1: 0,
- 2: 1,
- 3: 0,
- 4: 1,
- 5: 0,
- 6: 1,
- 7: 0,
- 8: 1,
- 9: 0,
- 10: 1,
- 11: 1
- })
-
- def test_get_cpu_online_no_output(self):
- """Test error case, no output."""
- self.dw.RunCommandOnDut = mock.Mock(return_value=(0, '', ''))
- with self.assertRaises(AssertionError):
- self.dw.GetCpuOnline()
-
- def test_get_cpu_online_command_error(self):
- """Test error case, command error."""
- self.dw.RunCommandOnDut = mock.Mock(side_effect=AssertionError)
- with self.assertRaises(AssertionError):
- self.dw.GetCpuOnline()
-
- @mock.patch.object(DutWrapper, 'SetupArmCores')
- def test_setup_cpu_usage_little_on_arm(self, mock_setup_arm):
- self.dw.SetupArmCores = mock_setup_arm
- self.dw.RunCommandOnDut = mock.Mock(return_value=(0, 'armv7l', ''))
- self.dw.dut_config['cpu_usage'] = 'little_only'
- self.dw.SetupCpuUsage()
- self.dw.SetupArmCores.assert_called_once_with()
-
- @mock.patch.object(DutWrapper, 'SetupArmCores')
- def test_setup_cpu_usage_big_on_aarch64(self, mock_setup_arm):
- self.dw.SetupArmCores = mock_setup_arm
- self.dw.RunCommandOnDut = mock.Mock(return_value=(0, 'aarch64', ''))
- self.dw.dut_config['cpu_usage'] = 'big_only'
- self.dw.SetupCpuUsage()
- self.dw.SetupArmCores.assert_called_once_with()
-
- @mock.patch.object(DutWrapper, 'SetupArmCores')
- def test_setup_cpu_usage_big_on_intel(self, mock_setup_arm):
- self.dw.SetupArmCores = mock_setup_arm
- self.dw.RunCommandOnDut = mock.Mock(return_value=(0, 'x86_64', ''))
- self.dw.dut_config['cpu_usage'] = 'big_only'
- self.dw.SetupCpuUsage()
- # Check that SetupArmCores not called with invalid setup.
- self.dw.SetupArmCores.assert_not_called()
-
- @mock.patch.object(DutWrapper, 'SetupArmCores')
- def test_setup_cpu_usage_all_on_intel(self, mock_setup_arm):
- self.dw.SetupArmCores = mock_setup_arm
- self.dw.RunCommandOnDut = mock.Mock(return_value=(0, 'x86_64', ''))
- self.dw.dut_config['cpu_usage'] = 'all'
- self.dw.SetupCpuUsage()
- # Check that SetupArmCores not called in general case.
- self.dw.SetupArmCores.assert_not_called()
-
- def test_setup_arm_cores_big_on_big_little(self):
- self.dw.RunCommandOnDut = mock.Mock(side_effect=[
- (0, BIG_LITTLE_CPUINFO, ''),
- (0, '', ''),
- ])
- self.dw.dut_config['cpu_usage'] = 'big_only'
- self.dw.SetupArmCores()
- self.dw.RunCommandOnDut.assert_called_with(
- 'echo 1 | tee /sys/devices/system/cpu/cpu{2}/online; '
- 'echo 0 | tee /sys/devices/system/cpu/cpu{0,1}/online')
-
- def test_setup_arm_cores_little_on_big_little(self):
- self.dw.RunCommandOnDut = mock.Mock(side_effect=[
- (0, BIG_LITTLE_CPUINFO, ''),
- (0, '', ''),
- ])
- self.dw.dut_config['cpu_usage'] = 'little_only'
- self.dw.SetupArmCores()
- self.dw.RunCommandOnDut.assert_called_with(
- 'echo 1 | tee /sys/devices/system/cpu/cpu{0,1}/online; '
- 'echo 0 | tee /sys/devices/system/cpu/cpu{2}/online')
-
- def test_setup_arm_cores_invalid_config(self):
- self.dw.RunCommandOnDut = mock.Mock(side_effect=[
- (0, LITTLE_ONLY_CPUINFO, ''),
- (0, '', ''),
- ])
- self.dw.dut_config['cpu_usage'] = 'big_only'
- self.dw.SetupArmCores()
- # Check that setup command is not sent when trying
- # to use 'big_only' on a platform with all little cores.
- self.dw.RunCommandOnDut.assert_called_once_with('cat /proc/cpuinfo')
-
- def test_setup_arm_cores_not_big_little(self):
- self.dw.RunCommandOnDut = mock.Mock(side_effect=[
- (0, NOT_BIG_LITTLE_CPUINFO, ''),
- (0, '', ''),
- ])
- self.dw.dut_config['cpu_usage'] = 'big_only'
- self.dw.SetupArmCores()
- # Check that setup command is not sent when trying
- # to use 'big_only' on a platform w/o support of big/little.
- self.dw.RunCommandOnDut.assert_called_once_with('cat /proc/cpuinfo')
-
- def test_setup_arm_cores_unsupported_cpu_usage(self):
- self.dw.RunCommandOnDut = mock.Mock(side_effect=[
- (0, BIG_LITTLE_CPUINFO, ''),
- (0, '', ''),
- ])
- self.dw.dut_config['cpu_usage'] = 'exclusive_cores'
- self.dw.SetupArmCores()
- # Check that setup command is not sent when trying to use
- # 'exclusive_cores' on ARM CPU setup.
- self.dw.RunCommandOnDut.assert_called_once_with('cat /proc/cpuinfo')
-
- def test_setup_cpu_freq_single_full(self):
- online = [0]
- self.dw.RunCommandOnDut = mock.Mock(side_effect=[
- (0,
- '/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies\n',
- ''),
- (0, '1 2 3 4 5 6 7 8 9 10', ''),
- (0, '', ''),
- ])
- self.dw.dut_config['cpu_freq_pct'] = 100
- self.dw.SetupCpuFreq(online)
- self.assertGreaterEqual(self.dw.RunCommandOnDut.call_count, 3)
- self.assertEqual(
- self.dw.RunCommandOnDut.call_args,
- mock.call('echo 10 | tee '
- '/sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq '
- '/sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq'))
-
- def test_setup_cpu_freq_middle(self):
- online = [0]
- self.dw.RunCommandOnDut = mock.Mock(side_effect=[
- (0,
- '/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies\n',
- ''),
- (0, '1 2 3 4 5 6 7 8 9 10', ''),
- (0, '', ''),
- ])
- self.dw.dut_config['cpu_freq_pct'] = 60
- self.dw.SetupCpuFreq(online)
- self.assertGreaterEqual(self.dw.RunCommandOnDut.call_count, 2)
- self.assertEqual(
- self.dw.RunCommandOnDut.call_args,
- mock.call('echo 6 | tee '
- '/sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq '
- '/sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq'))
-
- def test_setup_cpu_freq_lowest(self):
- online = [0]
- self.dw.RunCommandOnDut = mock.Mock(side_effect=[
- (0,
- '/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies\n',
- ''),
- (0, '1 2 3 4 5 6 7 8 9 10', ''),
- (0, '', ''),
- ])
- self.dw.dut_config['cpu_freq_pct'] = 0
- self.dw.SetupCpuFreq(online)
- self.assertGreaterEqual(self.dw.RunCommandOnDut.call_count, 2)
- self.assertEqual(
- self.dw.RunCommandOnDut.call_args,
- mock.call('echo 1 | tee '
- '/sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq '
- '/sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq'))
-
- def test_setup_cpu_freq_multiple_middle(self):
- online = [0, 1]
- self.dw.RunCommandOnDut = mock.Mock(side_effect=[
- (0,
- '/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies\n'
- '/sys/devices/system/cpu/cpu1/cpufreq/scaling_available_frequencies\n',
- ''),
- (0, '1 2 3 4 5 6 7 8 9 10', ''),
- (0, '', ''),
- (0, '1 4 6 8 10 12 14 16 18 20', ''),
- (0, '', ''),
- ])
- self.dw.dut_config['cpu_freq_pct'] = 70
- self.dw.SetupCpuFreq(online)
- self.assertEqual(self.dw.RunCommandOnDut.call_count, 5)
- self.assertEqual(
- self.dw.RunCommandOnDut.call_args_list[2],
- mock.call('echo 7 | tee '
- '/sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq '
- '/sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq'))
- self.assertEqual(
- self.dw.RunCommandOnDut.call_args_list[4],
- mock.call('echo 14 | tee '
- '/sys/devices/system/cpu/cpu1/cpufreq/scaling_max_freq '
- '/sys/devices/system/cpu/cpu1/cpufreq/scaling_min_freq'))
-
- def test_setup_cpu_freq_no_scaling_available(self):
- online = [0, 1]
- self.dw.RunCommandOnDut = mock.Mock(
- return_value=(2, '', 'No such file or directory'))
- self.dw.dut_config['cpu_freq_pct'] = 50
- self.dw.SetupCpuFreq(online)
- self.dw.RunCommandOnDut.assert_called_once()
- self.assertNotRegex(self.dw.RunCommandOnDut.call_args_list[0][0][0],
- '^echo.*scaling_max_freq$')
-
- def test_setup_cpu_freq_multiple_no_access(self):
- online = [0, 1]
- self.dw.RunCommandOnDut = mock.Mock(side_effect=[
- (0,
- '/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies\n'
- '/sys/devices/system/cpu/cpu1/cpufreq/scaling_available_frequencies\n',
- ''),
- (0, '1 4 6 8 10 12 14 16 18 20', ''),
- AssertionError(),
- ])
- self.dw.dut_config['cpu_freq_pct'] = 30
- # Error status causes log fatal.
- with self.assertRaises(AssertionError):
- self.dw.SetupCpuFreq(online)
-
- @mock.patch.object(time, 'sleep')
- def test_wait_cooldown_nowait(self, mock_sleep):
- mock_sleep.return_value = 0
- self.dw.RunCommandOnDut = mock.Mock(return_value=(0, '39000', ''))
- self.dw.dut_config['cooldown_time'] = 10
- self.dw.dut_config['cooldown_temp'] = 40
- wait_time = self.dw.WaitCooldown()
- # Send command to DUT only once to check temperature
- # and make sure it does not exceed the threshold.
- self.dw.RunCommandOnDut.assert_called_once()
- mock_sleep.assert_not_called()
- self.assertEqual(wait_time, 0)
-
- @mock.patch.object(time, 'sleep')
- def test_wait_cooldown_needwait_once(self, mock_sleep):
- """Wait one iteration for cooldown.
-
- Set large enough timeout and changing temperature
- output. Make sure it exits when expected value
- received.
- Expect that WaitCooldown check temp twice.
- """
- mock_sleep.return_value = 0
- self.dw.RunCommandOnDut = mock.Mock(side_effect=[(0, '41000',
- ''), (0, '39999', '')])
- self.dw.dut_config['cooldown_time'] = 100
- self.dw.dut_config['cooldown_temp'] = 40
- wait_time = self.dw.WaitCooldown()
- self.dw.RunCommandOnDut.assert_called()
- self.assertEqual(self.dw.RunCommandOnDut.call_count, 2)
- mock_sleep.assert_called()
- self.assertGreater(wait_time, 0)
-
- @mock.patch.object(time, 'sleep')
- def test_wait_cooldown_needwait(self, mock_sleep):
- """Test exit by timeout.
-
- Send command to DUT checking the temperature and
- check repeatedly until timeout goes off.
- Output from temperature sensor never changes.
- """
- mock_sleep.return_value = 0
- self.dw.RunCommandOnDut = mock.Mock(return_value=(0, '41000', ''))
- self.dw.dut_config['cooldown_time'] = 60
- self.dw.dut_config['cooldown_temp'] = 40
- wait_time = self.dw.WaitCooldown()
- self.dw.RunCommandOnDut.assert_called()
- self.assertGreater(self.dw.RunCommandOnDut.call_count, 2)
- mock_sleep.assert_called()
- self.assertGreater(wait_time, 0)
-
- @mock.patch.object(time, 'sleep')
- def test_wait_cooldown_needwait_multtemp(self, mock_sleep):
- """Wait until all temps go down.
-
- Set large enough timeout and changing temperature
- output. Make sure it exits when expected value
- for all temperatures received.
- Expect 3 checks.
- """
- mock_sleep.return_value = 0
- self.dw.RunCommandOnDut = mock.Mock(side_effect=[
- (0, '41000\n20000\n30000\n45000', ''),
- (0, '39000\n20000\n30000\n41000', ''),
- (0, '39000\n20000\n30000\n31000', ''),
- ])
- self.dw.dut_config['cooldown_time'] = 100
- self.dw.dut_config['cooldown_temp'] = 40
- wait_time = self.dw.WaitCooldown()
- self.dw.RunCommandOnDut.assert_called()
- self.assertEqual(self.dw.RunCommandOnDut.call_count, 3)
- mock_sleep.assert_called()
- self.assertGreater(wait_time, 0)
-
- @mock.patch.object(time, 'sleep')
- def test_wait_cooldown_thermal_error(self, mock_sleep):
- """Handle error status.
-
- Any error should be considered non-fatal.
- """
- mock_sleep.return_value = 0
- self.dw.RunCommandOnDut = mock.Mock(side_effect=[
- (1, '39000\n20000\n30000\n41000', 'Thermal error'),
- (1, '39000\n20000\n30000\n31000', 'Thermal error'),
- ])
- self.dw.dut_config['cooldown_time'] = 10
- self.dw.dut_config['cooldown_temp'] = 40
- wait_time = self.dw.WaitCooldown()
- # Check that errors are ignored.
- self.dw.RunCommandOnDut.assert_called_with(
- 'cat /sys/class/thermal/thermal_zone*/temp', ignore_status=True)
- self.assertEqual(self.dw.RunCommandOnDut.call_count, 2)
- # Check that we are waiting even when an error is returned
- # as soon as data is coming.
- mock_sleep.assert_called()
- self.assertGreater(wait_time, 0)
-
- @mock.patch.object(time, 'sleep')
- def test_wait_cooldown_thermal_no_output(self, mock_sleep):
- """Handle no output.
-
- Check handling of empty stdout.
- """
- mock_sleep.return_value = 0
- self.dw.RunCommandOnDut = mock.Mock(side_effect=[(1, '', 'Thermal error')])
- self.dw.dut_config['cooldown_time'] = 10
- self.dw.dut_config['cooldown_temp'] = 40
- wait_time = self.dw.WaitCooldown()
- # Check that errors are ignored.
- self.dw.RunCommandOnDut.assert_called_once_with(
- 'cat /sys/class/thermal/thermal_zone*/temp', ignore_status=True)
- # No wait.
- mock_sleep.assert_not_called()
- self.assertEqual(wait_time, 0)
-
- @mock.patch.object(time, 'sleep')
- def test_wait_cooldown_thermal_ws_output(self, mock_sleep):
- """Handle whitespace output.
-
- Check handling of whitespace only.
- """
- mock_sleep.return_value = 0
- self.dw.RunCommandOnDut = mock.Mock(side_effect=[(1, '\n',
- 'Thermal error')])
- self.dw.dut_config['cooldown_time'] = 10
- self.dw.dut_config['cooldown_temp'] = 40
- wait_time = self.dw.WaitCooldown()
- # Check that errors are ignored.
- self.dw.RunCommandOnDut.assert_called_once_with(
- 'cat /sys/class/thermal/thermal_zone*/temp', ignore_status=True)
- # No wait.
- mock_sleep.assert_not_called()
- self.assertEqual(wait_time, 0)
-
- def test_stop_ui(self):
- self.dw.RunCommandOnDut = mock.Mock(return_value=(0, '', ''))
- self.dw.StopUI()
- self.dw.RunCommandOnDut.assert_called_once_with(
- 'stop ui', ignore_status=True)
-
- def test_start_ui(self):
- self.dw.RunCommandOnDut = mock.Mock(return_value=(0, '', ''))
- self.dw.StartUI()
- self.dw.RunCommandOnDut.assert_called_once_with(
- 'start ui', ignore_status=True)
-
- def test_setup_device(self):
-
- def FakeRunner(command, ignore_status=False):
- # pylint fix for unused variable.
- del command, ignore_status
- return 0, '', ''
-
- def SetupMockFunctions():
- self.dw.RunCommandOnDut = mock.Mock(return_value=FakeRunner)
- self.dw.KerncmdUpdateNeeded = mock.Mock(return_value=True)
- self.dw.UpdateKerncmdIntelPstate = mock.Mock(return_value=0)
- self.dw.DisableASLR = mock.Mock(return_value=0)
- self.dw.SetupCpuUsage = mock.Mock(return_value=0)
- self.dw.SetupCpuFreq = mock.Mock(return_value=0)
- self.dw.GetCpuOnline = mock.Mock(return_value={0: 1, 1: 1, 2: 0})
- self.dw.SetCpuGovernor = mock.Mock(return_value=0)
- self.dw.DisableTurbo = mock.Mock(return_value=0)
- self.dw.StopUI = mock.Mock(return_value=0)
- self.dw.StartUI = mock.Mock(return_value=0)
- self.dw.WaitCooldown = mock.Mock(return_value=0)
- self.dw.DecreaseWaitTime = mock.Mock(return_value=0)
-
- self.dw.dut_config['enable_aslr'] = False
- self.dw.dut_config['cooldown_time'] = 0
- self.dw.dut_config['governor'] = 'fake_governor'
- self.dw.dut_config['cpu_freq_pct'] = 65
- self.dw.dut_config['intel_pstate'] = 'no_hwp'
-
- SetupMockFunctions()
- self.dw.SetupDevice()
-
- self.dw.KerncmdUpdateNeeded.assert_called_once()
- self.dw.UpdateKerncmdIntelPstate.assert_called_once()
- self.dw.DisableASLR.assert_called_once()
- self.dw.SetupCpuUsage.assert_called_once_with()
- self.dw.SetupCpuFreq.assert_called_once_with([0, 1])
- self.dw.GetCpuOnline.assert_called_once_with()
- self.dw.SetCpuGovernor.assert_called_once_with('fake_governor')
- self.dw.DisableTurbo.assert_called_once_with()
- self.dw.DecreaseWaitTime.assert_called_once_with()
- self.dw.StopUI.assert_called_once_with()
- self.dw.StartUI.assert_called_once_with()
- self.dw.WaitCooldown.assert_not_called()
-
- # Test SetupDevice with cooldown
- self.dw.dut_config['cooldown_time'] = 10
-
- SetupMockFunctions()
- self.dw.GetCpuOnline = mock.Mock(return_value={0: 0, 1: 1})
-
- self.dw.SetupDevice()
-
- self.dw.WaitCooldown.assert_called_once_with()
- self.dw.DisableASLR.assert_called_once()
- self.dw.DisableTurbo.assert_called_once_with()
- self.dw.SetupCpuUsage.assert_called_once_with()
- self.dw.SetupCpuFreq.assert_called_once_with([1])
- self.dw.SetCpuGovernor.assert_called()
- self.dw.GetCpuOnline.assert_called_once_with()
- self.dw.StopUI.assert_called_once_with()
- self.dw.StartUI.assert_called_once_with()
- self.assertGreater(self.dw.SetCpuGovernor.call_count, 1)
- self.assertEqual(self.dw.SetCpuGovernor.call_args,
- mock.call('fake_governor'))
-
- # Test SetupDevice with cooldown
- SetupMockFunctions()
- self.dw.SetupCpuUsage = mock.Mock(side_effect=RuntimeError())
-
- with self.assertRaises(RuntimeError):
- self.dw.SetupDevice()
-
- # This call injected an exception.
- self.dw.SetupCpuUsage.assert_called_once_with()
- # Calls following the expeption are skipped.
- self.dw.WaitCooldown.assert_not_called()
- self.dw.DisableTurbo.assert_not_called()
- self.dw.SetupCpuFreq.assert_not_called()
- self.dw.SetCpuGovernor.assert_not_called()
- self.dw.GetCpuOnline.assert_not_called()
- # Check that Stop/Start UI are always called.
- self.dw.StopUI.assert_called_once_with()
- self.dw.StartUI.assert_called_once_with()
-
-
-if __name__ == '__main__':
- unittest.main()
+ """Class of DutWrapper test."""
+
+ real_logger = logger.GetLogger()
+
+ mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter)
+ mock_logger = mock.Mock(spec=logger.Logger)
+
+ def __init__(self, *args, **kwargs):
+ super(DutWrapperTest, self).__init__(*args, **kwargs)
+
+ def setUp(self):
+ self.dw = DutWrapper(
+ "/tmp/chromeos",
+ "lumpy.cros2",
+ log_level="verbose",
+ logger=self.mock_logger,
+ ce=self.mock_cmd_exec,
+ dut_config={},
+ )
+
+ @mock.patch.object(
+ command_executer.CommandExecuter, "CrosRunCommandWOutput"
+ )
+ def test_run_command_on_dut(self, mock_cros_runcmd):
+ self.mock_cmd_exec.CrosRunCommandWOutput = mock_cros_runcmd
+ mock_cros_runcmd.return_value = (0, "", "")
+ mock_cros_runcmd.assert_not_called()
+ self.dw.RunCommandOnDut("run command;")
+ mock_cros_runcmd.assert_called_once_with(
+ "run command;", chromeos_root="/tmp/chromeos", machine="lumpy.cros2"
+ )
+
+ @mock.patch.object(
+ command_executer.CommandExecuter, "CrosRunCommandWOutput"
+ )
+ def test_dut_wrapper_fatal_error(self, mock_cros_runcmd):
+ self.mock_cmd_exec.CrosRunCommandWOutput = mock_cros_runcmd
+ # Command returns error 1.
+ mock_cros_runcmd.return_value = (1, "", "Error!")
+ mock_cros_runcmd.assert_not_called()
+ self.dw.RunCommandOnDut("run command;")
+ mock_cros_runcmd.assert_called_once_with(
+ "run command;", chromeos_root="/tmp/chromeos", machine="lumpy.cros2"
+ )
+ # Error status causes log fatal.
+ self.assertEqual(
+ self.mock_logger.method_calls[-1],
+ mock.call.LogFatal(
+ "Command execution on DUT lumpy.cros2 failed.\n"
+ "Failing command: run command;\nreturned 1\n"
+ "Error message: Error!"
+ ),
+ )
+
+ @mock.patch.object(
+ command_executer.CommandExecuter, "CrosRunCommandWOutput"
+ )
+ def test_dut_wrapper_ignore_error(self, mock_cros_runcmd):
+ self.mock_cmd_exec.CrosRunCommandWOutput = mock_cros_runcmd
+ # Command returns error 1.
+ mock_cros_runcmd.return_value = (1, "", "Error!")
+ self.dw.RunCommandOnDut("run command;", ignore_status=True)
+ mock_cros_runcmd.assert_called_once_with(
+ "run command;", chromeos_root="/tmp/chromeos", machine="lumpy.cros2"
+ )
+ # Error status is not fatal. LogError records the error message.
+ self.assertEqual(
+ self.mock_logger.method_calls[-1],
+ mock.call.LogError(
+ "Command execution on DUT lumpy.cros2 failed.\n"
+ "Failing command: run command;\nreturned 1\n"
+ "Error message: Error!\n"
+ "(Failure is considered non-fatal. Continue.)"
+ ),
+ )
+
+ def test_disable_aslr(self):
+ self.dw.RunCommandOnDut = mock.Mock(return_value=(0, "", ""))
+ self.dw.DisableASLR()
+ # pyformat: disable
+ set_cpu_cmd = (
+ "set -e; "
+ "if [[ -e /proc/sys/kernel/randomize_va_space ]]; then "
+ " echo 0 > /proc/sys/kernel/randomize_va_space; "
+ "fi"
+ )
+ self.dw.RunCommandOnDut.assert_called_once_with(set_cpu_cmd)
+
+ def test_set_cpu_governor(self):
+ self.dw.RunCommandOnDut = mock.Mock(return_value=(0, "", ""))
+ self.dw.SetCpuGovernor("new_governor", ignore_status=False)
+ set_cpu_cmd = (
+ "for f in `ls -d /sys/devices/system/cpu/cpu*/cpufreq 2>/dev/null`; do "
+ # Skip writing scaling_governor if cpu is offline.
+ " [[ -e ${f/cpufreq/online} ]] && grep -q 0 ${f/cpufreq/online} "
+ " && continue; "
+ " cd $f; "
+ " if [[ -e scaling_governor ]]; then "
+ " echo %s > scaling_governor; fi; "
+ "done; "
+ )
+ self.dw.RunCommandOnDut.assert_called_once_with(
+ set_cpu_cmd % "new_governor", ignore_status=False
+ )
+
+ def test_set_cpu_governor_propagate_error(self):
+ self.dw.RunCommandOnDut = mock.Mock(return_value=(1, "", "Error."))
+ self.dw.SetCpuGovernor("non-exist_governor")
+ set_cpu_cmd = (
+ "for f in `ls -d /sys/devices/system/cpu/cpu*/cpufreq 2>/dev/null`; do "
+ # Skip writing scaling_governor if cpu is not online.
+ " [[ -e ${f/cpufreq/online} ]] && grep -q 0 ${f/cpufreq/online} "
+ " && continue; "
+ " cd $f; "
+ " if [[ -e scaling_governor ]]; then "
+ " echo %s > scaling_governor; fi; "
+ "done; "
+ )
+ # By default error status is fatal.
+ self.dw.RunCommandOnDut.assert_called_once_with(
+ set_cpu_cmd % "non-exist_governor", ignore_status=False
+ )
+
+ def test_set_cpu_governor_ignore_status(self):
+ self.dw.RunCommandOnDut = mock.Mock(return_value=(1, "", "Error."))
+ ret_code = self.dw.SetCpuGovernor(
+ "non-exist_governor", ignore_status=True
+ )
+ set_cpu_cmd = (
+ "for f in `ls -d /sys/devices/system/cpu/cpu*/cpufreq 2>/dev/null`; do "
+ # Skip writing scaling_governor if cpu is not online.
+ " [[ -e ${f/cpufreq/online} ]] && grep -q 0 ${f/cpufreq/online} "
+ " && continue; "
+ " cd $f; "
+ " if [[ -e scaling_governor ]]; then "
+ " echo %s > scaling_governor; fi; "
+ "done; "
+ )
+ self.dw.RunCommandOnDut.assert_called_once_with(
+ set_cpu_cmd % "non-exist_governor", ignore_status=True
+ )
+ self.assertEqual(ret_code, 1)
+
+ def test_disable_turbo(self):
+ self.dw.RunCommandOnDut = mock.Mock(return_value=(0, "", ""))
+ self.dw.DisableTurbo()
+ set_cpu_cmd = (
+ # Disable Turbo in Intel pstate driver
+ "if [[ -e /sys/devices/system/cpu/intel_pstate/no_turbo ]]; then "
+ " if grep -q 0 /sys/devices/system/cpu/intel_pstate/no_turbo; then "
+ " echo -n 1 > /sys/devices/system/cpu/intel_pstate/no_turbo; "
+ " fi; "
+ "fi; "
+ )
+ self.dw.RunCommandOnDut.assert_called_once_with(set_cpu_cmd)
+
+ def test_get_cpu_online_two(self):
+ """Test one digit CPU #."""
+ self.dw.RunCommandOnDut = mock.Mock(
+ return_value=(
+ 0,
+ "/sys/devices/system/cpu/cpu0/online 0\n"
+ "/sys/devices/system/cpu/cpu1/online 1\n",
+ "",
+ )
+ )
+ cpu_online = self.dw.GetCpuOnline()
+ self.assertEqual(cpu_online, {0: 0, 1: 1})
+
+ def test_get_cpu_online_twelve(self):
+ """Test two digit CPU #."""
+ self.dw.RunCommandOnDut = mock.Mock(
+ return_value=(
+ 0,
+ "/sys/devices/system/cpu/cpu0/online 1\n"
+ "/sys/devices/system/cpu/cpu1/online 0\n"
+ "/sys/devices/system/cpu/cpu10/online 1\n"
+ "/sys/devices/system/cpu/cpu11/online 1\n"
+ "/sys/devices/system/cpu/cpu2/online 1\n"
+ "/sys/devices/system/cpu/cpu3/online 0\n"
+ "/sys/devices/system/cpu/cpu4/online 1\n"
+ "/sys/devices/system/cpu/cpu5/online 0\n"
+ "/sys/devices/system/cpu/cpu6/online 1\n"
+ "/sys/devices/system/cpu/cpu7/online 0\n"
+ "/sys/devices/system/cpu/cpu8/online 1\n"
+ "/sys/devices/system/cpu/cpu9/online 0\n",
+ "",
+ )
+ )
+ cpu_online = self.dw.GetCpuOnline()
+ self.assertEqual(
+ cpu_online,
+ {
+ 0: 1,
+ 1: 0,
+ 2: 1,
+ 3: 0,
+ 4: 1,
+ 5: 0,
+ 6: 1,
+ 7: 0,
+ 8: 1,
+ 9: 0,
+ 10: 1,
+ 11: 1,
+ },
+ )
+
+ def test_get_cpu_online_no_output(self):
+ """Test error case, no output."""
+ self.dw.RunCommandOnDut = mock.Mock(return_value=(0, "", ""))
+ with self.assertRaises(AssertionError):
+ self.dw.GetCpuOnline()
+
+ def test_get_cpu_online_command_error(self):
+ """Test error case, command error."""
+ self.dw.RunCommandOnDut = mock.Mock(side_effect=AssertionError)
+ with self.assertRaises(AssertionError):
+ self.dw.GetCpuOnline()
+
+ @mock.patch.object(DutWrapper, "SetupArmCores")
+ def test_setup_cpu_usage_little_on_arm(self, mock_setup_arm):
+ self.dw.SetupArmCores = mock_setup_arm
+ self.dw.RunCommandOnDut = mock.Mock(return_value=(0, "armv7l", ""))
+ self.dw.dut_config["cpu_usage"] = "little_only"
+ self.dw.SetupCpuUsage()
+ self.dw.SetupArmCores.assert_called_once_with()
+
+ @mock.patch.object(DutWrapper, "SetupArmCores")
+ def test_setup_cpu_usage_big_on_aarch64(self, mock_setup_arm):
+ self.dw.SetupArmCores = mock_setup_arm
+ self.dw.RunCommandOnDut = mock.Mock(return_value=(0, "aarch64", ""))
+ self.dw.dut_config["cpu_usage"] = "big_only"
+ self.dw.SetupCpuUsage()
+ self.dw.SetupArmCores.assert_called_once_with()
+
+ @mock.patch.object(DutWrapper, "SetupArmCores")
+ def test_setup_cpu_usage_big_on_intel(self, mock_setup_arm):
+ self.dw.SetupArmCores = mock_setup_arm
+ self.dw.RunCommandOnDut = mock.Mock(return_value=(0, "x86_64", ""))
+ self.dw.dut_config["cpu_usage"] = "big_only"
+ self.dw.SetupCpuUsage()
+ # Check that SetupArmCores not called with invalid setup.
+ self.dw.SetupArmCores.assert_not_called()
+
+ @mock.patch.object(DutWrapper, "SetupArmCores")
+ def test_setup_cpu_usage_all_on_intel(self, mock_setup_arm):
+ self.dw.SetupArmCores = mock_setup_arm
+ self.dw.RunCommandOnDut = mock.Mock(return_value=(0, "x86_64", ""))
+ self.dw.dut_config["cpu_usage"] = "all"
+ self.dw.SetupCpuUsage()
+ # Check that SetupArmCores not called in general case.
+ self.dw.SetupArmCores.assert_not_called()
+
+ def test_setup_arm_cores_big_on_big_little(self):
+ self.dw.RunCommandOnDut = mock.Mock(
+ side_effect=[
+ (0, BIG_LITTLE_CPUINFO, ""),
+ (0, "", ""),
+ ]
+ )
+ self.dw.dut_config["cpu_usage"] = "big_only"
+ self.dw.SetupArmCores()
+ self.dw.RunCommandOnDut.assert_called_with(
+ "echo 1 | tee /sys/devices/system/cpu/cpu{2}/online; "
+ "echo 0 | tee /sys/devices/system/cpu/cpu{0,1}/online"
+ )
+
+ def test_setup_arm_cores_little_on_big_little(self):
+ self.dw.RunCommandOnDut = mock.Mock(
+ side_effect=[
+ (0, BIG_LITTLE_CPUINFO, ""),
+ (0, "", ""),
+ ]
+ )
+ self.dw.dut_config["cpu_usage"] = "little_only"
+ self.dw.SetupArmCores()
+ self.dw.RunCommandOnDut.assert_called_with(
+ "echo 1 | tee /sys/devices/system/cpu/cpu{0,1}/online; "
+ "echo 0 | tee /sys/devices/system/cpu/cpu{2}/online"
+ )
+
+ def test_setup_arm_cores_invalid_config(self):
+ self.dw.RunCommandOnDut = mock.Mock(
+ side_effect=[
+ (0, LITTLE_ONLY_CPUINFO, ""),
+ (0, "", ""),
+ ]
+ )
+ self.dw.dut_config["cpu_usage"] = "big_only"
+ self.dw.SetupArmCores()
+ # Check that setup command is not sent when trying
+ # to use 'big_only' on a platform with all little cores.
+ self.dw.RunCommandOnDut.assert_called_once_with("cat /proc/cpuinfo")
+
+ def test_setup_arm_cores_not_big_little(self):
+ self.dw.RunCommandOnDut = mock.Mock(
+ side_effect=[
+ (0, NOT_BIG_LITTLE_CPUINFO, ""),
+ (0, "", ""),
+ ]
+ )
+ self.dw.dut_config["cpu_usage"] = "big_only"
+ self.dw.SetupArmCores()
+ # Check that setup command is not sent when trying
+ # to use 'big_only' on a platform w/o support of big/little.
+ self.dw.RunCommandOnDut.assert_called_once_with("cat /proc/cpuinfo")
+
+ def test_setup_arm_cores_unsupported_cpu_usage(self):
+ self.dw.RunCommandOnDut = mock.Mock(
+ side_effect=[
+ (0, BIG_LITTLE_CPUINFO, ""),
+ (0, "", ""),
+ ]
+ )
+ self.dw.dut_config["cpu_usage"] = "exclusive_cores"
+ self.dw.SetupArmCores()
+ # Check that setup command is not sent when trying to use
+ # 'exclusive_cores' on ARM CPU setup.
+ self.dw.RunCommandOnDut.assert_called_once_with("cat /proc/cpuinfo")
+
+ def test_setup_cpu_freq_single_full(self):
+ online = [0]
+ self.dw.RunCommandOnDut = mock.Mock(
+ side_effect=[
+ (
+ 0,
+ "/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies\n",
+ "",
+ ),
+ (0, "1 2 3 4 5 6 7 8 9 10", ""),
+ (0, "", ""),
+ ]
+ )
+ self.dw.dut_config["cpu_freq_pct"] = 100
+ self.dw.SetupCpuFreq(online)
+ self.assertGreaterEqual(self.dw.RunCommandOnDut.call_count, 3)
+ self.assertEqual(
+ self.dw.RunCommandOnDut.call_args,
+ mock.call(
+ "echo 10 | tee "
+ "/sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq "
+ "/sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq"
+ ),
+ )
+
+ def test_setup_cpu_freq_middle(self):
+ online = [0]
+ self.dw.RunCommandOnDut = mock.Mock(
+ side_effect=[
+ (
+ 0,
+ "/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies\n",
+ "",
+ ),
+ (0, "1 2 3 4 5 6 7 8 9 10", ""),
+ (0, "", ""),
+ ]
+ )
+ self.dw.dut_config["cpu_freq_pct"] = 60
+ self.dw.SetupCpuFreq(online)
+ self.assertGreaterEqual(self.dw.RunCommandOnDut.call_count, 2)
+ self.assertEqual(
+ self.dw.RunCommandOnDut.call_args,
+ mock.call(
+ "echo 6 | tee "
+ "/sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq "
+ "/sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq"
+ ),
+ )
+
+ def test_setup_cpu_freq_lowest(self):
+ online = [0]
+ self.dw.RunCommandOnDut = mock.Mock(
+ side_effect=[
+ (
+ 0,
+ "/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies\n",
+ "",
+ ),
+ (0, "1 2 3 4 5 6 7 8 9 10", ""),
+ (0, "", ""),
+ ]
+ )
+ self.dw.dut_config["cpu_freq_pct"] = 0
+ self.dw.SetupCpuFreq(online)
+ self.assertGreaterEqual(self.dw.RunCommandOnDut.call_count, 2)
+ self.assertEqual(
+ self.dw.RunCommandOnDut.call_args,
+ mock.call(
+ "echo 1 | tee "
+ "/sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq "
+ "/sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq"
+ ),
+ )
+
+ def test_setup_cpu_freq_multiple_middle(self):
+ online = [0, 1]
+ self.dw.RunCommandOnDut = mock.Mock(
+ side_effect=[
+ (
+ 0,
+ "/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies\n"
+ "/sys/devices/system/cpu/cpu1/cpufreq/scaling_available_frequencies\n",
+ "",
+ ),
+ (0, "1 2 3 4 5 6 7 8 9 10", ""),
+ (0, "", ""),
+ (0, "1 4 6 8 10 12 14 16 18 20", ""),
+ (0, "", ""),
+ ]
+ )
+ self.dw.dut_config["cpu_freq_pct"] = 70
+ self.dw.SetupCpuFreq(online)
+ self.assertEqual(self.dw.RunCommandOnDut.call_count, 5)
+ self.assertEqual(
+ self.dw.RunCommandOnDut.call_args_list[2],
+ mock.call(
+ "echo 7 | tee "
+ "/sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq "
+ "/sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq"
+ ),
+ )
+ self.assertEqual(
+ self.dw.RunCommandOnDut.call_args_list[4],
+ mock.call(
+ "echo 14 | tee "
+ "/sys/devices/system/cpu/cpu1/cpufreq/scaling_max_freq "
+ "/sys/devices/system/cpu/cpu1/cpufreq/scaling_min_freq"
+ ),
+ )
+
+ def test_setup_cpu_freq_no_scaling_available(self):
+ online = [0, 1]
+ self.dw.RunCommandOnDut = mock.Mock(
+ return_value=(2, "", "No such file or directory")
+ )
+ self.dw.dut_config["cpu_freq_pct"] = 50
+ self.dw.SetupCpuFreq(online)
+ self.dw.RunCommandOnDut.assert_called_once()
+ self.assertNotRegex(
+ self.dw.RunCommandOnDut.call_args_list[0][0][0],
+ "^echo.*scaling_max_freq$",
+ )
+
+ def test_setup_cpu_freq_multiple_no_access(self):
+ online = [0, 1]
+ self.dw.RunCommandOnDut = mock.Mock(
+ side_effect=[
+ (
+ 0,
+ "/sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies\n"
+ "/sys/devices/system/cpu/cpu1/cpufreq/scaling_available_frequencies\n",
+ "",
+ ),
+ (0, "1 4 6 8 10 12 14 16 18 20", ""),
+ AssertionError(),
+ ]
+ )
+ self.dw.dut_config["cpu_freq_pct"] = 30
+ # Error status causes log fatal.
+ with self.assertRaises(AssertionError):
+ self.dw.SetupCpuFreq(online)
+
+ @mock.patch.object(time, "sleep")
+ def test_wait_cooldown_nowait(self, mock_sleep):
+ mock_sleep.return_value = 0
+ self.dw.RunCommandOnDut = mock.Mock(return_value=(0, "39000", ""))
+ self.dw.dut_config["cooldown_time"] = 10
+ self.dw.dut_config["cooldown_temp"] = 40
+ wait_time = self.dw.WaitCooldown()
+ # Send command to DUT only once to check temperature
+ # and make sure it does not exceed the threshold.
+ self.dw.RunCommandOnDut.assert_called_once()
+ mock_sleep.assert_not_called()
+ self.assertEqual(wait_time, 0)
+
+ @mock.patch.object(time, "sleep")
+ def test_wait_cooldown_needwait_once(self, mock_sleep):
+ """Wait one iteration for cooldown.
+
+ Set large enough timeout and changing temperature
+ output. Make sure it exits when expected value
+ received.
+ Expect that WaitCooldown check temp twice.
+ """
+ mock_sleep.return_value = 0
+ self.dw.RunCommandOnDut = mock.Mock(
+ side_effect=[(0, "41000", ""), (0, "39999", "")]
+ )
+ self.dw.dut_config["cooldown_time"] = 100
+ self.dw.dut_config["cooldown_temp"] = 40
+ wait_time = self.dw.WaitCooldown()
+ self.dw.RunCommandOnDut.assert_called()
+ self.assertEqual(self.dw.RunCommandOnDut.call_count, 2)
+ mock_sleep.assert_called()
+ self.assertGreater(wait_time, 0)
+
+ @mock.patch.object(time, "sleep")
+ def test_wait_cooldown_needwait(self, mock_sleep):
+ """Test exit by timeout.
+
+ Send command to DUT checking the temperature and
+ check repeatedly until timeout goes off.
+ Output from temperature sensor never changes.
+ """
+ mock_sleep.return_value = 0
+ self.dw.RunCommandOnDut = mock.Mock(return_value=(0, "41000", ""))
+ self.dw.dut_config["cooldown_time"] = 60
+ self.dw.dut_config["cooldown_temp"] = 40
+ wait_time = self.dw.WaitCooldown()
+ self.dw.RunCommandOnDut.assert_called()
+ self.assertGreater(self.dw.RunCommandOnDut.call_count, 2)
+ mock_sleep.assert_called()
+ self.assertGreater(wait_time, 0)
+
+ @mock.patch.object(time, "sleep")
+ def test_wait_cooldown_needwait_multtemp(self, mock_sleep):
+ """Wait until all temps go down.
+
+ Set large enough timeout and changing temperature
+ output. Make sure it exits when expected value
+ for all temperatures received.
+ Expect 3 checks.
+ """
+ mock_sleep.return_value = 0
+ self.dw.RunCommandOnDut = mock.Mock(
+ side_effect=[
+ (0, "41000\n20000\n30000\n45000", ""),
+ (0, "39000\n20000\n30000\n41000", ""),
+ (0, "39000\n20000\n30000\n31000", ""),
+ ]
+ )
+ self.dw.dut_config["cooldown_time"] = 100
+ self.dw.dut_config["cooldown_temp"] = 40
+ wait_time = self.dw.WaitCooldown()
+ self.dw.RunCommandOnDut.assert_called()
+ self.assertEqual(self.dw.RunCommandOnDut.call_count, 3)
+ mock_sleep.assert_called()
+ self.assertGreater(wait_time, 0)
+
+ @mock.patch.object(time, "sleep")
+ def test_wait_cooldown_thermal_error(self, mock_sleep):
+ """Handle error status.
+
+ Any error should be considered non-fatal.
+ """
+ mock_sleep.return_value = 0
+ self.dw.RunCommandOnDut = mock.Mock(
+ side_effect=[
+ (1, "39000\n20000\n30000\n41000", "Thermal error"),
+ (1, "39000\n20000\n30000\n31000", "Thermal error"),
+ ]
+ )
+ self.dw.dut_config["cooldown_time"] = 10
+ self.dw.dut_config["cooldown_temp"] = 40
+ wait_time = self.dw.WaitCooldown()
+ # Check that errors are ignored.
+ self.dw.RunCommandOnDut.assert_called_with(
+ "cat /sys/class/thermal/thermal_zone*/temp", ignore_status=True
+ )
+ self.assertEqual(self.dw.RunCommandOnDut.call_count, 2)
+ # Check that we are waiting even when an error is returned
+ # as soon as data is coming.
+ mock_sleep.assert_called()
+ self.assertGreater(wait_time, 0)
+
+ @mock.patch.object(time, "sleep")
+ def test_wait_cooldown_thermal_no_output(self, mock_sleep):
+ """Handle no output.
+
+ Check handling of empty stdout.
+ """
+ mock_sleep.return_value = 0
+ self.dw.RunCommandOnDut = mock.Mock(
+ side_effect=[(1, "", "Thermal error")]
+ )
+ self.dw.dut_config["cooldown_time"] = 10
+ self.dw.dut_config["cooldown_temp"] = 40
+ wait_time = self.dw.WaitCooldown()
+ # Check that errors are ignored.
+ self.dw.RunCommandOnDut.assert_called_once_with(
+ "cat /sys/class/thermal/thermal_zone*/temp", ignore_status=True
+ )
+ # No wait.
+ mock_sleep.assert_not_called()
+ self.assertEqual(wait_time, 0)
+
+ @mock.patch.object(time, "sleep")
+ def test_wait_cooldown_thermal_ws_output(self, mock_sleep):
+ """Handle whitespace output.
+
+ Check handling of whitespace only.
+ """
+ mock_sleep.return_value = 0
+ self.dw.RunCommandOnDut = mock.Mock(
+ side_effect=[(1, "\n", "Thermal error")]
+ )
+ self.dw.dut_config["cooldown_time"] = 10
+ self.dw.dut_config["cooldown_temp"] = 40
+ wait_time = self.dw.WaitCooldown()
+ # Check that errors are ignored.
+ self.dw.RunCommandOnDut.assert_called_once_with(
+ "cat /sys/class/thermal/thermal_zone*/temp", ignore_status=True
+ )
+ # No wait.
+ mock_sleep.assert_not_called()
+ self.assertEqual(wait_time, 0)
+
+ def test_stop_ui(self):
+ self.dw.RunCommandOnDut = mock.Mock(return_value=(0, "", ""))
+ self.dw.StopUI()
+ self.dw.RunCommandOnDut.assert_called_once_with(
+ "stop ui", ignore_status=True
+ )
+
+ def test_start_ui(self):
+ self.dw.RunCommandOnDut = mock.Mock(return_value=(0, "", ""))
+ self.dw.StartUI()
+ self.dw.RunCommandOnDut.assert_called_once_with(
+ "start ui", ignore_status=True
+ )
+
+ def test_setup_device(self):
+ def FakeRunner(command, ignore_status=False):
+ # pylint fix for unused variable.
+ del command, ignore_status
+ return 0, "", ""
+
+ def SetupMockFunctions():
+ self.dw.RunCommandOnDut = mock.Mock(return_value=FakeRunner)
+ self.dw.KerncmdUpdateNeeded = mock.Mock(return_value=True)
+ self.dw.UpdateKerncmdIntelPstate = mock.Mock(return_value=0)
+ self.dw.DisableASLR = mock.Mock(return_value=0)
+ self.dw.SetupCpuUsage = mock.Mock(return_value=0)
+ self.dw.SetupCpuFreq = mock.Mock(return_value=0)
+ self.dw.GetCpuOnline = mock.Mock(return_value={0: 1, 1: 1, 2: 0})
+ self.dw.SetCpuGovernor = mock.Mock(return_value=0)
+ self.dw.DisableTurbo = mock.Mock(return_value=0)
+ self.dw.StopUI = mock.Mock(return_value=0)
+ self.dw.StartUI = mock.Mock(return_value=0)
+ self.dw.WaitCooldown = mock.Mock(return_value=0)
+ self.dw.DecreaseWaitTime = mock.Mock(return_value=0)
+
+ self.dw.dut_config["enable_aslr"] = False
+ self.dw.dut_config["cooldown_time"] = 0
+ self.dw.dut_config["governor"] = "fake_governor"
+ self.dw.dut_config["cpu_freq_pct"] = 65
+ self.dw.dut_config["intel_pstate"] = "no_hwp"
+
+ SetupMockFunctions()
+ self.dw.SetupDevice()
+
+ self.dw.KerncmdUpdateNeeded.assert_called_once()
+ self.dw.UpdateKerncmdIntelPstate.assert_called_once()
+ self.dw.DisableASLR.assert_called_once()
+ self.dw.SetupCpuUsage.assert_called_once_with()
+ self.dw.SetupCpuFreq.assert_called_once_with([0, 1])
+ self.dw.GetCpuOnline.assert_called_once_with()
+ self.dw.SetCpuGovernor.assert_called_once_with("fake_governor")
+ self.dw.DisableTurbo.assert_called_once_with()
+ self.dw.DecreaseWaitTime.assert_called_once_with()
+ self.dw.StopUI.assert_called_once_with()
+ self.dw.StartUI.assert_called_once_with()
+ self.dw.WaitCooldown.assert_not_called()
+
+ # Test SetupDevice with cooldown
+ self.dw.dut_config["cooldown_time"] = 10
+
+ SetupMockFunctions()
+ self.dw.GetCpuOnline = mock.Mock(return_value={0: 0, 1: 1})
+
+ self.dw.SetupDevice()
+
+ self.dw.WaitCooldown.assert_called_once_with()
+ self.dw.DisableASLR.assert_called_once()
+ self.dw.DisableTurbo.assert_called_once_with()
+ self.dw.SetupCpuUsage.assert_called_once_with()
+ self.dw.SetupCpuFreq.assert_called_once_with([1])
+ self.dw.SetCpuGovernor.assert_called()
+ self.dw.GetCpuOnline.assert_called_once_with()
+ self.dw.StopUI.assert_called_once_with()
+ self.dw.StartUI.assert_called_once_with()
+ self.assertGreater(self.dw.SetCpuGovernor.call_count, 1)
+ self.assertEqual(
+ self.dw.SetCpuGovernor.call_args, mock.call("fake_governor")
+ )
+
+ # Test SetupDevice with cooldown
+ SetupMockFunctions()
+ self.dw.SetupCpuUsage = mock.Mock(side_effect=RuntimeError())
+
+ with self.assertRaises(RuntimeError):
+ self.dw.SetupDevice()
+
+ # This call injected an exception.
+ self.dw.SetupCpuUsage.assert_called_once_with()
+ # Calls following the expeption are skipped.
+ self.dw.WaitCooldown.assert_not_called()
+ self.dw.DisableTurbo.assert_not_called()
+ self.dw.SetupCpuFreq.assert_not_called()
+ self.dw.SetCpuGovernor.assert_not_called()
+ self.dw.GetCpuOnline.assert_not_called()
+ # Check that Stop/Start UI are always called.
+ self.dw.StopUI.assert_called_once_with()
+ self.dw.StartUI.assert_called_once_with()
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/cros_utils/email_sender.py b/cros_utils/email_sender.py
index df8afbc4..ccf4c1b4 100755
--- a/cros_utils/email_sender.py
+++ b/cros_utils/email_sender.py
@@ -1,259 +1,314 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utilities to send email either through SMTP or SendGMR."""
-from __future__ import print_function
import base64
import contextlib
import datetime
+from email import encoders as Encoders
+from email.mime.base import MIMEBase
+from email.mime.multipart import MIMEMultipart
+from email.mime.text import MIMEText
import getpass
import json
import os
import smtplib
import tempfile
-from email import encoders as Encoders
-from email.mime.base import MIMEBase
-from email.mime.multipart import MIMEMultipart
-from email.mime.text import MIMEText
from cros_utils import command_executer
-X20_PATH = '/google/data/rw/teams/c-compiler-chrome/prod_emails'
+
+X20_PATH = "/google/data/rw/teams/c-compiler-chrome/prod_emails"
@contextlib.contextmanager
def AtomicallyWriteFile(file_path):
- temp_path = file_path + '.in_progress'
- try:
- with open(temp_path, 'w') as f:
- yield f
- os.rename(temp_path, file_path)
- except:
- os.remove(temp_path)
- raise
+ temp_path = file_path + ".in_progress"
+ try:
+ with open(temp_path, "w") as f:
+ yield f
+ os.rename(temp_path, file_path)
+ except:
+ os.remove(temp_path)
+ raise
class EmailSender(object):
- """Utility class to send email through SMTP or SendGMR."""
-
- class Attachment(object):
- """Small class to keep track of attachment info."""
-
- def __init__(self, name, content):
- self.name = name
- self.content = content
-
- def SendX20Email(self,
- subject,
- identifier,
- well_known_recipients=(),
- direct_recipients=(),
- text_body=None,
- html_body=None):
- """Enqueues an email in our x20 outbox.
-
- These emails ultimately get sent by the machinery in
- //depot/google3/googleclient/chrome/chromeos_toolchain/mailer/mail.go. This
- kind of sending is intended for accounts that don't have smtp or gmr access
- (e.g., role accounts), but can be used by anyone with x20 access.
-
- All emails are sent from `mdb.c-compiler-chrome+${identifier}@google.com`.
-
- Args:
- subject: email subject. Must be nonempty.
- identifier: email identifier, or the text that lands after the `+` in the
- "From" email address. Must be nonempty.
- well_known_recipients: a list of well-known recipients for the email.
- These are translated into addresses by our mailer.
- Current potential values for this are ('detective',
- 'cwp-team', 'cros-team', 'mage'). Either this or
- direct_recipients must be a nonempty list.
- direct_recipients: @google.com emails to send addresses to. Either this
- or well_known_recipients must be a nonempty list.
- text_body: a 'text/plain' email body to send. Either this or html_body
- must be a nonempty string. Both may be specified
- html_body: a 'text/html' email body to send. Either this or text_body
- must be a nonempty string. Both may be specified
- """
- # `str`s act a lot like tuples/lists. Ensure that we're not accidentally
- # iterating over one of those (or anything else that's sketchy, for that
- # matter).
- if not isinstance(well_known_recipients, (tuple, list)):
- raise ValueError('`well_known_recipients` is unexpectedly a %s' %
- type(well_known_recipients))
-
- if not isinstance(direct_recipients, (tuple, list)):
- raise ValueError('`direct_recipients` is unexpectedly a %s' %
- type(direct_recipients))
-
- if not subject or not identifier:
- raise ValueError('both `subject` and `identifier` must be nonempty')
-
- if not (well_known_recipients or direct_recipients):
- raise ValueError('either `well_known_recipients` or `direct_recipients` '
- 'must be specified')
-
- for recipient in direct_recipients:
- if not recipient.endswith('@google.com'):
- raise ValueError('All recipients must end with @google.com')
-
- if not (text_body or html_body):
- raise ValueError('either `text_body` or `html_body` must be specified')
-
- email_json = {
- 'email_identifier': identifier,
- 'subject': subject,
- }
-
- if well_known_recipients:
- email_json['well_known_recipients'] = well_known_recipients
-
- if direct_recipients:
- email_json['direct_recipients'] = direct_recipients
-
- if text_body:
- email_json['body'] = text_body
-
- if html_body:
- email_json['html_body'] = html_body
-
- # The name of this has two parts:
- # - An easily sortable time, to provide uniqueness and let our emailer
- # send things in the order they were put into the outbox.
- # - 64 bits of entropy, so two racing email sends don't clobber the same
- # file.
- now = datetime.datetime.utcnow().isoformat('T', 'seconds') + 'Z'
- entropy = base64.urlsafe_b64encode(os.getrandom(8))
- entropy_str = entropy.rstrip(b'=').decode('utf-8')
- result_path = os.path.join(X20_PATH, now + '_' + entropy_str + '.json')
-
- with AtomicallyWriteFile(result_path) as f:
- json.dump(email_json, f)
-
- def SendEmail(self,
+ """Utility class to send email through SMTP or SendGMR."""
+
+ class Attachment(object):
+ """Small class to keep track of attachment info."""
+
+ def __init__(self, name, content):
+ self.name = name
+ self.content = content
+
+ def SendX20Email(
+ self,
+ subject,
+ identifier,
+ well_known_recipients=(),
+ direct_recipients=(),
+ text_body=None,
+ html_body=None,
+ ):
+ """Enqueues an email in our x20 outbox.
+
+ These emails ultimately get sent by the machinery in
+ //depot/google3/googleclient/chrome/chromeos_toolchain/mailer/mail.go. This
+ kind of sending is intended for accounts that don't have smtp or gmr access
+ (e.g., role accounts), but can be used by anyone with x20 access.
+
+ All emails are sent from `mdb.c-compiler-chrome+${identifier}@google.com`.
+
+ Args:
+ subject: email subject. Must be nonempty.
+ identifier: email identifier, or the text that lands after the `+` in the
+ "From" email address. Must be nonempty.
+ well_known_recipients: a list of well-known recipients for the email.
+ These are translated into addresses by our mailer.
+ Current potential values for this are ('detective',
+ 'cwp-team', 'cros-team', 'mage'). Either this or
+ direct_recipients must be a nonempty list.
+ direct_recipients: @google.com emails to send addresses to. Either this
+ or well_known_recipients must be a nonempty list.
+ text_body: a 'text/plain' email body to send. Either this or html_body
+ must be a nonempty string. Both may be specified
+ html_body: a 'text/html' email body to send. Either this or text_body
+ must be a nonempty string. Both may be specified
+ """
+ # `str`s act a lot like tuples/lists. Ensure that we're not accidentally
+ # iterating over one of those (or anything else that's sketchy, for that
+ # matter).
+ if not isinstance(well_known_recipients, (tuple, list)):
+ raise ValueError(
+ "`well_known_recipients` is unexpectedly a %s"
+ % type(well_known_recipients)
+ )
+
+ if not isinstance(direct_recipients, (tuple, list)):
+ raise ValueError(
+ "`direct_recipients` is unexpectedly a %s"
+ % type(direct_recipients)
+ )
+
+ if not subject or not identifier:
+ raise ValueError("both `subject` and `identifier` must be nonempty")
+
+ if not (well_known_recipients or direct_recipients):
+ raise ValueError(
+ "either `well_known_recipients` or `direct_recipients` "
+ "must be specified"
+ )
+
+ for recipient in direct_recipients:
+ if not recipient.endswith("@google.com"):
+ raise ValueError("All recipients must end with @google.com")
+
+ if not (text_body or html_body):
+ raise ValueError(
+ "either `text_body` or `html_body` must be specified"
+ )
+
+ email_json = {
+ "email_identifier": identifier,
+ "subject": subject,
+ }
+
+ if well_known_recipients:
+ email_json["well_known_recipients"] = well_known_recipients
+
+ if direct_recipients:
+ email_json["direct_recipients"] = direct_recipients
+
+ if text_body:
+ email_json["body"] = text_body
+
+ if html_body:
+ email_json["html_body"] = html_body
+
+ # The name of this has two parts:
+ # - An easily sortable time, to provide uniqueness and let our emailer
+ # send things in the order they were put into the outbox.
+ # - 64 bits of entropy, so two racing email sends don't clobber the same
+ # file.
+ now = datetime.datetime.utcnow().isoformat("T", "seconds") + "Z"
+ entropy = base64.urlsafe_b64encode(os.getrandom(8))
+ entropy_str = entropy.rstrip(b"=").decode("utf-8")
+ result_path = os.path.join(X20_PATH, now + "_" + entropy_str + ".json")
+
+ with AtomicallyWriteFile(result_path) as f:
+ json.dump(email_json, f)
+
+ def SendEmail(
+ self,
+ email_to,
+ subject,
+ text_to_send,
+ email_cc=None,
+ email_bcc=None,
+ email_from=None,
+ msg_type="plain",
+ attachments=None,
+ ):
+ """Choose appropriate email method and call it."""
+ if os.path.exists("/usr/bin/sendgmr"):
+ self.SendGMREmail(
email_to,
subject,
text_to_send,
- email_cc=None,
- email_bcc=None,
- email_from=None,
- msg_type='plain',
- attachments=None):
- """Choose appropriate email method and call it."""
- if os.path.exists('/usr/bin/sendgmr'):
- self.SendGMREmail(email_to, subject, text_to_send, email_cc, email_bcc,
- email_from, msg_type, attachments)
- else:
- self.SendSMTPEmail(email_to, subject, text_to_send, email_cc, email_bcc,
- email_from, msg_type, attachments)
-
- def SendSMTPEmail(self, email_to, subject, text_to_send, email_cc, email_bcc,
- email_from, msg_type, attachments):
- """Send email via standard smtp mail."""
- # Email summary to the current user.
- msg = MIMEMultipart()
-
- if not email_from:
- email_from = os.path.basename(__file__)
-
- msg['To'] = ','.join(email_to)
- msg['Subject'] = subject
-
- if email_from:
- msg['From'] = email_from
- if email_cc:
- msg['CC'] = ','.join(email_cc)
- email_to += email_cc
- if email_bcc:
- msg['BCC'] = ','.join(email_bcc)
- email_to += email_bcc
-
- msg.attach(MIMEText(text_to_send, msg_type))
- if attachments:
- for attachment in attachments:
- part = MIMEBase('application', 'octet-stream')
- part.set_payload(attachment.content)
- Encoders.encode_base64(part)
- part.add_header('Content-Disposition',
- 'attachment; filename="%s"' % attachment.name)
- msg.attach(part)
-
- # Send the message via our own SMTP server, but don't include the
- # envelope header.
- s = smtplib.SMTP('localhost')
- s.sendmail(email_from, email_to, msg.as_string())
- s.quit()
-
- def SendGMREmail(self, email_to, subject, text_to_send, email_cc, email_bcc,
- email_from, msg_type, attachments):
- """Send email via sendgmr program."""
- ce = command_executer.GetCommandExecuter(log_level='none')
-
- if not email_from:
- email_from = getpass.getuser() + '@google.com'
-
- to_list = ','.join(email_to)
-
- if not text_to_send:
- text_to_send = 'Empty message body.'
-
- to_be_deleted = []
- try:
- with tempfile.NamedTemporaryFile('w', encoding='utf-8',
- delete=False) as f:
- f.write(text_to_send)
- f.flush()
- to_be_deleted.append(f.name)
-
- # Fix single-quotes inside the subject. In bash, to escape a single quote
- # (e.g 'don't') you need to replace it with '\'' (e.g. 'don'\''t'). To
- # make Python read the backslash as a backslash rather than an escape
- # character, you need to double it. So...
- subject = subject.replace("'", "'\\''")
-
- if msg_type == 'html':
- command = ("sendgmr --to='%s' --from='%s' --subject='%s' "
- "--html_file='%s' --body_file=/dev/null" %
- (to_list, email_from, subject, f.name))
- else:
- command = ("sendgmr --to='%s' --from='%s' --subject='%s' "
- "--body_file='%s'" % (to_list, email_from, subject, f.name))
-
- if email_cc:
- cc_list = ','.join(email_cc)
- command += " --cc='%s'" % cc_list
- if email_bcc:
- bcc_list = ','.join(email_bcc)
- command += " --bcc='%s'" % bcc_list
-
- if attachments:
- attachment_files = []
- for attachment in attachments:
- if '<html>' in attachment.content:
- report_suffix = '_report.html'
- else:
- report_suffix = '_report.txt'
- with tempfile.NamedTemporaryFile('w',
- encoding='utf-8',
- delete=False,
- suffix=report_suffix) as f:
- f.write(attachment.content)
- f.flush()
- attachment_files.append(f.name)
- files = ','.join(attachment_files)
- command += " --attachment_files='%s'" % files
- to_be_deleted += attachment_files
-
- # Send the message via our own GMR server.
- status = ce.RunCommand(command)
- return status
-
- finally:
- for f in to_be_deleted:
- os.remove(f)
+ email_cc,
+ email_bcc,
+ email_from,
+ msg_type,
+ attachments,
+ )
+ else:
+ self.SendSMTPEmail(
+ email_to,
+ subject,
+ text_to_send,
+ email_cc,
+ email_bcc,
+ email_from,
+ msg_type,
+ attachments,
+ )
+
+ def SendSMTPEmail(
+ self,
+ email_to,
+ subject,
+ text_to_send,
+ email_cc,
+ email_bcc,
+ email_from,
+ msg_type,
+ attachments,
+ ):
+ """Send email via standard smtp mail."""
+ # Email summary to the current user.
+ msg = MIMEMultipart()
+
+ if not email_from:
+ email_from = os.path.basename(__file__)
+
+ msg["To"] = ",".join(email_to)
+ msg["Subject"] = subject
+
+ if email_from:
+ msg["From"] = email_from
+ if email_cc:
+ msg["CC"] = ",".join(email_cc)
+ email_to += email_cc
+ if email_bcc:
+ msg["BCC"] = ",".join(email_bcc)
+ email_to += email_bcc
+
+ msg.attach(MIMEText(text_to_send, msg_type))
+ if attachments:
+ for attachment in attachments:
+ part = MIMEBase("application", "octet-stream")
+ part.set_payload(attachment.content)
+ Encoders.encode_base64(part)
+ part.add_header(
+ "Content-Disposition",
+ 'attachment; filename="%s"' % attachment.name,
+ )
+ msg.attach(part)
+
+ # Send the message via our own SMTP server, but don't include the
+ # envelope header.
+ s = smtplib.SMTP("localhost")
+ s.sendmail(email_from, email_to, msg.as_string())
+ s.quit()
+
+ def SendGMREmail(
+ self,
+ email_to,
+ subject,
+ text_to_send,
+ email_cc,
+ email_bcc,
+ email_from,
+ msg_type,
+ attachments,
+ ):
+ """Send email via sendgmr program."""
+ ce = command_executer.GetCommandExecuter(log_level="none")
+
+ if not email_from:
+ email_from = getpass.getuser() + "@google.com"
+
+ to_list = ",".join(email_to)
+
+ if not text_to_send:
+ text_to_send = "Empty message body."
+
+ to_be_deleted = []
+ try:
+ with tempfile.NamedTemporaryFile(
+ "w", encoding="utf-8", delete=False
+ ) as f:
+ f.write(text_to_send)
+ f.flush()
+ to_be_deleted.append(f.name)
+
+ # Fix single-quotes inside the subject. In bash, to escape a single quote
+ # (e.g 'don't') you need to replace it with '\'' (e.g. 'don'\''t'). To
+ # make Python read the backslash as a backslash rather than an escape
+ # character, you need to double it. So...
+ subject = subject.replace("'", "'\\''")
+
+ if msg_type == "html":
+ command = (
+ "sendgmr --to='%s' --from='%s' --subject='%s' "
+ "--html_file='%s' --body_file=/dev/null"
+ % (to_list, email_from, subject, f.name)
+ )
+ else:
+ command = (
+ "sendgmr --to='%s' --from='%s' --subject='%s' "
+ "--body_file='%s'" % (to_list, email_from, subject, f.name)
+ )
+
+ if email_cc:
+ cc_list = ",".join(email_cc)
+ command += " --cc='%s'" % cc_list
+ if email_bcc:
+ bcc_list = ",".join(email_bcc)
+ command += " --bcc='%s'" % bcc_list
+
+ if attachments:
+ attachment_files = []
+ for attachment in attachments:
+ if "<html>" in attachment.content:
+ report_suffix = "_report.html"
+ else:
+ report_suffix = "_report.txt"
+ with tempfile.NamedTemporaryFile(
+ "w",
+ encoding="utf-8",
+ delete=False,
+ suffix=report_suffix,
+ ) as f:
+ f.write(attachment.content)
+ f.flush()
+ attachment_files.append(f.name)
+ files = ",".join(attachment_files)
+ command += " --attachment_files='%s'" % files
+ to_be_deleted += attachment_files
+
+ # Send the message via our own GMR server.
+ status = ce.RunCommand(command)
+ return status
+
+ finally:
+ for f in to_be_deleted:
+ os.remove(f)
diff --git a/cros_utils/email_sender_unittest.py b/cros_utils/email_sender_unittest.py
index ae41f143..66ec6a2d 100755
--- a/cros_utils/email_sender_unittest.py
+++ b/cros_utils/email_sender_unittest.py
@@ -1,13 +1,12 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for email_sender."""
-from __future__ import print_function
import contextlib
import io
@@ -19,102 +18,105 @@ import cros_utils.email_sender as email_sender
class Test(unittest.TestCase):
- """Tests for email_sender."""
-
- @mock.patch('cros_utils.email_sender.AtomicallyWriteFile')
- def test_x20_email_sending_rejects_invalid_inputs(self, write_file):
- test_cases = [
- {
- # no subject
- 'subject': '',
- 'identifier': 'foo',
- 'direct_recipients': ['gbiv@google.com'],
- 'text_body': 'hi',
- },
- {
- 'subject': 'foo',
- # no identifier
- 'identifier': '',
- 'direct_recipients': ['gbiv@google.com'],
- 'text_body': 'hi',
- },
- {
- 'subject': 'foo',
- 'identifier': 'foo',
- # no recipients
- 'direct_recipients': [],
- 'text_body': 'hi',
- },
- {
- 'subject': 'foo',
- 'identifier': 'foo',
- 'direct_recipients': ['gbiv@google.com'],
- # no body
- },
- {
- 'subject': 'foo',
- 'identifier': 'foo',
- # direct recipients lack @google.
- 'direct_recipients': ['gbiv'],
- 'text_body': 'hi',
- },
- {
- 'subject': 'foo',
- 'identifier': 'foo',
- # non-list recipients
- 'direct_recipients': 'gbiv@google.com',
- 'text_body': 'hi',
- },
- {
- 'subject': 'foo',
- 'identifier': 'foo',
- # non-list recipients
- 'well_known_recipients': 'detective',
- 'text_body': 'hi',
- },
- ]
-
- sender = email_sender.EmailSender()
- for case in test_cases:
- with self.assertRaises(ValueError):
- sender.SendX20Email(**case)
-
- write_file.assert_not_called()
-
- @mock.patch('cros_utils.email_sender.AtomicallyWriteFile')
- def test_x20_email_sending_translates_to_reasonable_json(self, write_file):
- written_obj = None
-
- @contextlib.contextmanager
- def actual_write_file(file_path):
- nonlocal written_obj
-
- self.assertTrue(file_path.startswith(email_sender.X20_PATH + '/'),
- file_path)
- f = io.StringIO()
- yield f
- written_obj = json.loads(f.getvalue())
-
- write_file.side_effect = actual_write_file
- email_sender.EmailSender().SendX20Email(
- subject='hello',
- identifier='world',
- well_known_recipients=['detective'],
- direct_recipients=['gbiv@google.com'],
- text_body='text',
- html_body='html',
- )
-
- self.assertEqual(
- written_obj, {
- 'subject': 'hello',
- 'email_identifier': 'world',
- 'well_known_recipients': ['detective'],
- 'direct_recipients': ['gbiv@google.com'],
- 'body': 'text',
- 'html_body': 'html',
- })
-
-
-if __name__ == '__main__':
- unittest.main()
+ """Tests for email_sender."""
+
+ @mock.patch("cros_utils.email_sender.AtomicallyWriteFile")
+ def test_x20_email_sending_rejects_invalid_inputs(self, write_file):
+ test_cases = [
+ {
+ # no subject
+ "subject": "",
+ "identifier": "foo",
+ "direct_recipients": ["gbiv@google.com"],
+ "text_body": "hi",
+ },
+ {
+ "subject": "foo",
+ # no identifier
+ "identifier": "",
+ "direct_recipients": ["gbiv@google.com"],
+ "text_body": "hi",
+ },
+ {
+ "subject": "foo",
+ "identifier": "foo",
+ # no recipients
+ "direct_recipients": [],
+ "text_body": "hi",
+ },
+ {
+ "subject": "foo",
+ "identifier": "foo",
+ "direct_recipients": ["gbiv@google.com"],
+ # no body
+ },
+ {
+ "subject": "foo",
+ "identifier": "foo",
+ # direct recipients lack @google.
+ "direct_recipients": ["gbiv"],
+ "text_body": "hi",
+ },
+ {
+ "subject": "foo",
+ "identifier": "foo",
+ # non-list recipients
+ "direct_recipients": "gbiv@google.com",
+ "text_body": "hi",
+ },
+ {
+ "subject": "foo",
+ "identifier": "foo",
+ # non-list recipients
+ "well_known_recipients": "detective",
+ "text_body": "hi",
+ },
+ ]
+
+ sender = email_sender.EmailSender()
+ for case in test_cases:
+ with self.assertRaises(ValueError):
+ sender.SendX20Email(**case)
+
+ write_file.assert_not_called()
+
+ @mock.patch("cros_utils.email_sender.AtomicallyWriteFile")
+ def test_x20_email_sending_translates_to_reasonable_json(self, write_file):
+ written_obj = None
+
+ @contextlib.contextmanager
+ def actual_write_file(file_path):
+ nonlocal written_obj
+
+ self.assertTrue(
+ file_path.startswith(email_sender.X20_PATH + "/"), file_path
+ )
+ f = io.StringIO()
+ yield f
+ written_obj = json.loads(f.getvalue())
+
+ write_file.side_effect = actual_write_file
+ email_sender.EmailSender().SendX20Email(
+ subject="hello",
+ identifier="world",
+ well_known_recipients=["detective"],
+ direct_recipients=["gbiv@google.com"],
+ text_body="text",
+ html_body="html",
+ )
+
+ self.assertEqual(
+ written_obj,
+ {
+ "subject": "hello",
+ "email_identifier": "world",
+ "well_known_recipients": ["detective"],
+ "direct_recipients": ["gbiv@google.com"],
+ "body": "text",
+ "html_body": "html",
+ },
+ )
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/cros_utils/file_utils.py b/cros_utils/file_utils.py
index f0e4064c..743edefa 100644
--- a/cros_utils/file_utils.py
+++ b/cros_utils/file_utils.py
@@ -1,11 +1,10 @@
# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utilities for operations on files."""
-from __future__ import print_function
import errno
import os
@@ -15,78 +14,83 @@ from cros_utils import command_executer
class FileUtils(object):
- """Utilities for operations on files."""
- _instance = None
- DRY_RUN = False
-
- @classmethod
- def Configure(cls, dry_run):
- cls.DRY_RUN = dry_run
-
- def __new__(cls, *args, **kwargs):
- if not cls._instance:
- if cls.DRY_RUN:
- cls._instance = super(FileUtils, cls).__new__(MockFileUtils, *args,
- **kwargs)
- else:
- cls._instance = super(FileUtils, cls).__new__(cls, *args, **kwargs)
- return cls._instance
-
- def Md5File(self, filename, log_level='verbose', _block_size=2**10):
- command = 'md5sum %s' % filename
- ce = command_executer.GetCommandExecuter(log_level=log_level)
- ret, out, _ = ce.RunCommandWOutput(command)
- if ret:
- raise RuntimeError('Could not run md5sum on: %s' % filename)
-
- return out.strip().split()[0]
-
- def CanonicalizeChromeOSRoot(self, chromeos_root):
- chromeos_root = os.path.expanduser(chromeos_root)
- if os.path.isdir(os.path.join(chromeos_root, 'chromite')):
- return chromeos_root
- else:
- return None
-
- def ChromeOSRootFromImage(self, chromeos_image):
- chromeos_root = os.path.join(
- os.path.dirname(chromeos_image), '../../../../..')
- return self.CanonicalizeChromeOSRoot(chromeos_root)
-
- def MkDirP(self, path):
- try:
- os.makedirs(path)
- except OSError as exc:
- if exc.errno == errno.EEXIST:
- pass
- else:
- raise
-
- def RmDir(self, path):
- shutil.rmtree(path, ignore_errors=True)
-
- def WriteFile(self, path, contents):
- with open(path, 'w', encoding='utf-8') as f:
- f.write(contents)
+ """Utilities for operations on files."""
+
+ _instance = None
+ DRY_RUN = False
+
+ @classmethod
+ def Configure(cls, dry_run):
+ cls.DRY_RUN = dry_run
+
+ def __new__(cls, *args, **kwargs):
+ if not cls._instance:
+ if cls.DRY_RUN:
+ cls._instance = super(FileUtils, cls).__new__(
+ MockFileUtils, *args, **kwargs
+ )
+ else:
+ cls._instance = super(FileUtils, cls).__new__(
+ cls, *args, **kwargs
+ )
+ return cls._instance
+
+ def Md5File(self, filename, log_level="verbose", _block_size=2 ** 10):
+ command = "md5sum %s" % filename
+ ce = command_executer.GetCommandExecuter(log_level=log_level)
+ ret, out, _ = ce.RunCommandWOutput(command)
+ if ret:
+ raise RuntimeError("Could not run md5sum on: %s" % filename)
+
+ return out.strip().split()[0]
+
+ def CanonicalizeChromeOSRoot(self, chromeos_root):
+ chromeos_root = os.path.expanduser(chromeos_root)
+ if os.path.isdir(os.path.join(chromeos_root, "chromite")):
+ return chromeos_root
+ else:
+ return None
+
+ def ChromeOSRootFromImage(self, chromeos_image):
+ chromeos_root = os.path.join(
+ os.path.dirname(chromeos_image), "../../../../.."
+ )
+ return self.CanonicalizeChromeOSRoot(chromeos_root)
+
+ def MkDirP(self, path):
+ try:
+ os.makedirs(path)
+ except OSError as exc:
+ if exc.errno == errno.EEXIST:
+ pass
+ else:
+ raise
+
+ def RmDir(self, path):
+ shutil.rmtree(path, ignore_errors=True)
+
+ def WriteFile(self, path, contents):
+ with open(path, "w", encoding="utf-8") as f:
+ f.write(contents)
class MockFileUtils(FileUtils):
- """Mock class for file utilities."""
+ """Mock class for file utilities."""
- def Md5File(self, filename, log_level='verbose', _block_size=2**10):
- return 'd41d8cd98f00b204e9800998ecf8427e'
+ def Md5File(self, filename, log_level="verbose", _block_size=2 ** 10):
+ return "d41d8cd98f00b204e9800998ecf8427e"
- def CanonicalizeChromeOSRoot(self, chromeos_root):
- return '/tmp/chromeos_root'
+ def CanonicalizeChromeOSRoot(self, chromeos_root):
+ return "/tmp/chromeos_root"
- def ChromeOSRootFromImage(self, chromeos_image):
- return '/tmp/chromeos_root'
+ def ChromeOSRootFromImage(self, chromeos_image):
+ return "/tmp/chromeos_root"
- def RmDir(self, path):
- pass
+ def RmDir(self, path):
+ pass
- def MkDirP(self, path):
- pass
+ def MkDirP(self, path):
+ pass
- def WriteFile(self, path, contents):
- pass
+ def WriteFile(self, path, contents):
+ pass
diff --git a/cros_utils/html_tools.py b/cros_utils/html_tools.py
index 688955ff..202bef05 100644
--- a/cros_utils/html_tools.py
+++ b/cros_utils/html_tools.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -7,7 +7,8 @@
def GetPageHeader(page_title):
- return """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+ return (
+ """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html>
<head>
@@ -32,64 +33,68 @@ function displayRow(id){
</head>
<body>
-""" % page_title
+"""
+ % page_title
+ )
def GetListHeader():
- return '<ul>'
+ return "<ul>"
def GetListItem(text):
- return '<li>%s</li>' % text
+ return "<li>%s</li>" % text
def GetListFooter():
- return '</ul>'
+ return "</ul>"
def GetList(items):
- return '<ul>%s</ul>' % ''.join(['<li>%s</li>' % item for item in items])
+ return "<ul>%s</ul>" % "".join(["<li>%s</li>" % item for item in items])
def GetParagraph(text):
- return '<p>%s</p>' % text
+ return "<p>%s</p>" % text
def GetFooter():
- return '</body>\n</html>'
+ return "</body>\n</html>"
def GetHeader(text, h=1):
- return '<h%s>%s</h%s>' % (h, text, h)
+ return "<h%s>%s</h%s>" % (h, text, h)
def GetTableHeader(headers):
- row = ''.join(['<th>%s</th>' % header for header in headers])
- return '<table><tr>%s</tr>' % row
+ row = "".join(["<th>%s</th>" % header for header in headers])
+ return "<table><tr>%s</tr>" % row
def GetTableFooter():
- return '</table>'
+ return "</table>"
def FormatLineBreaks(text):
- return text.replace('\n', '<br/>')
+ return text.replace("\n", "<br/>")
def GetTableCell(text):
- return '<td>%s</td>' % FormatLineBreaks(str(text))
+ return "<td>%s</td>" % FormatLineBreaks(str(text))
def GetTableRow(columns):
- return '<tr>%s</tr>' % '\n'.join([GetTableCell(column) for column in columns])
+ return "<tr>%s</tr>" % "\n".join(
+ [GetTableCell(column) for column in columns]
+ )
def GetTable(headers, rows):
- table = [GetTableHeader(headers)]
- table.extend([GetTableRow(row) for row in rows])
- table.append(GetTableFooter())
- return '\n'.join(table)
+ table = [GetTableHeader(headers)]
+ table.extend([GetTableRow(row) for row in rows])
+ table.append(GetTableFooter())
+ return "\n".join(table)
def GetLink(link, text):
- return "<a href='%s'>%s</a>" % (link, text)
+ return "<a href='%s'>%s</a>" % (link, text)
diff --git a/cros_utils/locks.py b/cros_utils/locks.py
index 848e23fc..db6f4343 100644
--- a/cros_utils/locks.py
+++ b/cros_utils/locks.py
@@ -1,49 +1,52 @@
# -*- coding: utf-8 -*-
#
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utilities for locking machines."""
-from __future__ import print_function
import time
-import lock_machine
-
from cros_utils import logger
+import lock_machine
def AcquireLock(machines, chromeos_root, timeout=1200):
- """Acquire lock for machine(s) with timeout."""
- start_time = time.time()
- locked = True
- sleep_time = min(10, timeout / 10.0)
- while True:
- try:
- lock_machine.LockManager(machines, False,
- chromeos_root).UpdateMachines(True)
- break
- except Exception as e:
- if time.time() - start_time > timeout:
- locked = False
- logger.GetLogger().LogWarning(
- 'Could not acquire lock on {0} within {1} seconds: {2}'.format(
- repr(machines), timeout, str(e)))
- break
- time.sleep(sleep_time)
- return locked
+ """Acquire lock for machine(s) with timeout."""
+ start_time = time.time()
+ locked = True
+ sleep_time = min(10, timeout / 10.0)
+ while True:
+ try:
+ lock_machine.LockManager(
+ machines, False, chromeos_root
+ ).UpdateMachines(True)
+ break
+ except Exception as e:
+ if time.time() - start_time > timeout:
+ locked = False
+ logger.GetLogger().LogWarning(
+ "Could not acquire lock on {0} within {1} seconds: {2}".format(
+ repr(machines), timeout, str(e)
+ )
+ )
+ break
+ time.sleep(sleep_time)
+ return locked
def ReleaseLock(machines, chromeos_root):
- """Release locked machine(s)."""
- unlocked = True
- try:
- lock_machine.LockManager(machines, False,
- chromeos_root).UpdateMachines(False)
- except Exception as e:
- unlocked = False
- logger.GetLogger().LogWarning(
- 'Could not unlock %s. %s' % (repr(machines), str(e)))
- return unlocked
+ """Release locked machine(s)."""
+ unlocked = True
+ try:
+ lock_machine.LockManager(machines, False, chromeos_root).UpdateMachines(
+ False
+ )
+ except Exception as e:
+ unlocked = False
+ logger.GetLogger().LogWarning(
+ "Could not unlock %s. %s" % (repr(machines), str(e))
+ )
+ return unlocked
diff --git a/cros_utils/logger.py b/cros_utils/logger.py
index e304fe12..e9b9d1ba 100644
--- a/cros_utils/logger.py
+++ b/cros_utils/logger.py
@@ -1,11 +1,10 @@
# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Logging helper module."""
-from __future__ import print_function
# System modules
import os.path
@@ -15,350 +14,380 @@ import traceback
# TODO(yunlian@google.com): Use GetRoot from misc
def GetRoot(scr_name):
- """Break up pathname into (dir+name)."""
- abs_path = os.path.abspath(scr_name)
- return (os.path.dirname(abs_path), os.path.basename(abs_path))
+ """Break up pathname into (dir+name)."""
+ abs_path = os.path.abspath(scr_name)
+ return (os.path.dirname(abs_path), os.path.basename(abs_path))
class Logger(object):
- """Logging helper class."""
-
- MAX_LOG_FILES = 10
-
- def __init__(self, rootdir, basefilename, print_console, subdir='logs'):
- logdir = os.path.join(rootdir, subdir)
- basename = os.path.join(logdir, basefilename)
-
- try:
- os.makedirs(logdir)
- except OSError:
- pass
- # print("Warning: Logs directory '%s' already exists." % logdir)
-
- self.print_console = print_console
-
- self._CreateLogFileHandles(basename)
-
- self._WriteTo(self.cmdfd, ' '.join(sys.argv), True)
-
- def _AddSuffix(self, basename, suffix):
- return '%s%s' % (basename, suffix)
-
- def _FindSuffix(self, basename):
- timestamps = []
- found_suffix = None
- for i in range(self.MAX_LOG_FILES):
- suffix = str(i)
- suffixed_basename = self._AddSuffix(basename, suffix)
- cmd_file = '%s.cmd' % suffixed_basename
- if not os.path.exists(cmd_file):
- found_suffix = suffix
- break
- timestamps.append(os.stat(cmd_file).st_mtime)
-
- if found_suffix:
- return found_suffix
-
- # Try to pick the oldest file with the suffix and return that one.
- suffix = str(timestamps.index(min(timestamps)))
- # print ("Warning: Overwriting log file: %s" %
- # self._AddSuffix(basename, suffix))
- return suffix
-
- def _CreateLogFileHandle(self, name):
- fd = None
- try:
- fd = open(name, 'w')
- except IOError:
- print('Warning: could not open %s for writing.' % name)
- return fd
-
- def _CreateLogFileHandles(self, basename):
- suffix = self._FindSuffix(basename)
- suffixed_basename = self._AddSuffix(basename, suffix)
-
- self.cmdfd = self._CreateLogFileHandle('%s.cmd' % suffixed_basename)
- self.stdout = self._CreateLogFileHandle('%s.out' % suffixed_basename)
- self.stderr = self._CreateLogFileHandle('%s.err' % suffixed_basename)
-
- self._CreateLogFileSymlinks(basename, suffixed_basename)
-
- # Symlink unsuffixed basename to currently suffixed one.
- def _CreateLogFileSymlinks(self, basename, suffixed_basename):
- try:
- for extension in ['cmd', 'out', 'err']:
- src_file = '%s.%s' % (os.path.basename(suffixed_basename), extension)
- dest_file = '%s.%s' % (basename, extension)
- if os.path.exists(dest_file):
- os.remove(dest_file)
- os.symlink(src_file, dest_file)
- except Exception as ex:
- print('Exception while creating symlinks: %s' % str(ex))
-
- def _WriteTo(self, fd, msg, flush):
- if fd:
- fd.write(msg)
- if flush:
- fd.flush()
-
- def LogStartDots(self, print_to_console=True):
- term_fd = self._GetStdout(print_to_console)
- if term_fd:
- term_fd.flush()
- term_fd.write('. ')
- term_fd.flush()
-
- def LogAppendDot(self, print_to_console=True):
- term_fd = self._GetStdout(print_to_console)
- if term_fd:
- term_fd.write('. ')
- term_fd.flush()
-
- def LogEndDots(self, print_to_console=True):
- term_fd = self._GetStdout(print_to_console)
- if term_fd:
- term_fd.write('\n')
- term_fd.flush()
-
- def LogMsg(self, file_fd, term_fd, msg, flush=True):
- if file_fd:
- self._WriteTo(file_fd, msg, flush)
- if self.print_console:
- self._WriteTo(term_fd, msg, flush)
-
- def _GetStdout(self, print_to_console):
- if print_to_console:
- return sys.stdout
- return None
-
- def _GetStderr(self, print_to_console):
- if print_to_console:
- return sys.stderr
- return None
-
- def LogCmdToFileOnly(self, cmd, machine='', user=None):
- if not self.cmdfd:
- return
-
- host = ('%s@%s' % (user, machine)) if user else machine
- flush = True
- cmd_string = 'CMD (%s): %s\n' % (host, cmd)
- self._WriteTo(self.cmdfd, cmd_string, flush)
-
- def LogCmd(self, cmd, machine='', user=None, print_to_console=True):
- if user:
- host = '%s@%s' % (user, machine)
- else:
- host = machine
-
- self.LogMsg(self.cmdfd, self._GetStdout(print_to_console),
- 'CMD (%s): %s\n' % (host, cmd))
-
- def LogFatal(self, msg, print_to_console=True):
- self.LogMsg(self.stderr, self._GetStderr(print_to_console),
- 'FATAL: %s\n' % msg)
- self.LogMsg(self.stderr, self._GetStderr(print_to_console),
- '\n'.join(traceback.format_stack()))
- sys.exit(1)
-
- def LogError(self, msg, print_to_console=True):
- self.LogMsg(self.stderr, self._GetStderr(print_to_console),
- 'ERROR: %s\n' % msg)
-
- def LogWarning(self, msg, print_to_console=True):
- self.LogMsg(self.stderr, self._GetStderr(print_to_console),
- 'WARNING: %s\n' % msg)
-
- def LogOutput(self, msg, print_to_console=True):
- self.LogMsg(self.stdout, self._GetStdout(print_to_console),
- 'OUTPUT: %s\n' % msg)
-
- def LogFatalIf(self, condition, msg):
- if condition:
- self.LogFatal(msg)
-
- def LogErrorIf(self, condition, msg):
- if condition:
- self.LogError(msg)
-
- def LogWarningIf(self, condition, msg):
- if condition:
- self.LogWarning(msg)
-
- def LogCommandOutput(self, msg, print_to_console=True):
- self.LogMsg(
- self.stdout, self._GetStdout(print_to_console), msg, flush=False)
-
- def LogCommandError(self, msg, print_to_console=True):
- self.LogMsg(
- self.stderr, self._GetStderr(print_to_console), msg, flush=False)
-
- def Flush(self):
- self.cmdfd.flush()
- self.stdout.flush()
- self.stderr.flush()
+ """Logging helper class."""
+
+ MAX_LOG_FILES = 10
+
+ def __init__(self, rootdir, basefilename, print_console, subdir="logs"):
+ logdir = os.path.join(rootdir, subdir)
+ basename = os.path.join(logdir, basefilename)
+
+ try:
+ os.makedirs(logdir)
+ except OSError:
+ pass
+ # print("Warning: Logs directory '%s' already exists." % logdir)
+
+ self.print_console = print_console
+
+ self._CreateLogFileHandles(basename)
+
+ self._WriteTo(self.cmdfd, " ".join(sys.argv), True)
+
+ def _AddSuffix(self, basename, suffix):
+ return "%s%s" % (basename, suffix)
+
+ def _FindSuffix(self, basename):
+ timestamps = []
+ found_suffix = None
+ for i in range(self.MAX_LOG_FILES):
+ suffix = str(i)
+ suffixed_basename = self._AddSuffix(basename, suffix)
+ cmd_file = "%s.cmd" % suffixed_basename
+ if not os.path.exists(cmd_file):
+ found_suffix = suffix
+ break
+ timestamps.append(os.stat(cmd_file).st_mtime)
+
+ if found_suffix:
+ return found_suffix
+
+ # Try to pick the oldest file with the suffix and return that one.
+ suffix = str(timestamps.index(min(timestamps)))
+ # print ("Warning: Overwriting log file: %s" %
+ # self._AddSuffix(basename, suffix))
+ return suffix
+
+ def _CreateLogFileHandle(self, name):
+ fd = None
+ try:
+ fd = open(name, "w")
+ except IOError:
+ print("Warning: could not open %s for writing." % name)
+ return fd
+
+ def _CreateLogFileHandles(self, basename):
+ suffix = self._FindSuffix(basename)
+ suffixed_basename = self._AddSuffix(basename, suffix)
+
+ self.cmdfd = self._CreateLogFileHandle("%s.cmd" % suffixed_basename)
+ self.stdout = self._CreateLogFileHandle("%s.out" % suffixed_basename)
+ self.stderr = self._CreateLogFileHandle("%s.err" % suffixed_basename)
+
+ self._CreateLogFileSymlinks(basename, suffixed_basename)
+
+ # Symlink unsuffixed basename to currently suffixed one.
+ def _CreateLogFileSymlinks(self, basename, suffixed_basename):
+ try:
+ for extension in ["cmd", "out", "err"]:
+ src_file = "%s.%s" % (
+ os.path.basename(suffixed_basename),
+ extension,
+ )
+ dest_file = "%s.%s" % (basename, extension)
+ if os.path.exists(dest_file):
+ os.remove(dest_file)
+ os.symlink(src_file, dest_file)
+ except Exception as ex:
+ print("Exception while creating symlinks: %s" % str(ex))
+
+ def _WriteTo(self, fd, msg, flush):
+ if fd:
+ fd.write(msg)
+ if flush:
+ fd.flush()
+
+ def LogStartDots(self, print_to_console=True):
+ term_fd = self._GetStdout(print_to_console)
+ if term_fd:
+ term_fd.flush()
+ term_fd.write(". ")
+ term_fd.flush()
+
+ def LogAppendDot(self, print_to_console=True):
+ term_fd = self._GetStdout(print_to_console)
+ if term_fd:
+ term_fd.write(". ")
+ term_fd.flush()
+
+ def LogEndDots(self, print_to_console=True):
+ term_fd = self._GetStdout(print_to_console)
+ if term_fd:
+ term_fd.write("\n")
+ term_fd.flush()
+
+ def LogMsg(self, file_fd, term_fd, msg, flush=True):
+ if file_fd:
+ self._WriteTo(file_fd, msg, flush)
+ if self.print_console:
+ self._WriteTo(term_fd, msg, flush)
+
+ def _GetStdout(self, print_to_console):
+ if print_to_console:
+ return sys.stdout
+ return None
+
+ def _GetStderr(self, print_to_console):
+ if print_to_console:
+ return sys.stderr
+ return None
+
+ def LogCmdToFileOnly(self, cmd, machine="", user=None):
+ if not self.cmdfd:
+ return
+
+ host = ("%s@%s" % (user, machine)) if user else machine
+ flush = True
+ cmd_string = "CMD (%s): %s\n" % (host, cmd)
+ self._WriteTo(self.cmdfd, cmd_string, flush)
+
+ def LogCmd(self, cmd, machine="", user=None, print_to_console=True):
+ if user:
+ host = "%s@%s" % (user, machine)
+ else:
+ host = machine
+
+ self.LogMsg(
+ self.cmdfd,
+ self._GetStdout(print_to_console),
+ "CMD (%s): %s\n" % (host, cmd),
+ )
+
+ def LogFatal(self, msg, print_to_console=True):
+ self.LogMsg(
+ self.stderr, self._GetStderr(print_to_console), "FATAL: %s\n" % msg
+ )
+ self.LogMsg(
+ self.stderr,
+ self._GetStderr(print_to_console),
+ "\n".join(traceback.format_stack()),
+ )
+ sys.exit(1)
+
+ def LogError(self, msg, print_to_console=True):
+ self.LogMsg(
+ self.stderr, self._GetStderr(print_to_console), "ERROR: %s\n" % msg
+ )
+
+ def LogWarning(self, msg, print_to_console=True):
+ self.LogMsg(
+ self.stderr,
+ self._GetStderr(print_to_console),
+ "WARNING: %s\n" % msg,
+ )
+
+ def LogOutput(self, msg, print_to_console=True):
+ self.LogMsg(
+ self.stdout, self._GetStdout(print_to_console), "OUTPUT: %s\n" % msg
+ )
+
+ def LogFatalIf(self, condition, msg):
+ if condition:
+ self.LogFatal(msg)
+
+ def LogErrorIf(self, condition, msg):
+ if condition:
+ self.LogError(msg)
+
+ def LogWarningIf(self, condition, msg):
+ if condition:
+ self.LogWarning(msg)
+
+ def LogCommandOutput(self, msg, print_to_console=True):
+ self.LogMsg(
+ self.stdout, self._GetStdout(print_to_console), msg, flush=False
+ )
+
+ def LogCommandError(self, msg, print_to_console=True):
+ self.LogMsg(
+ self.stderr, self._GetStderr(print_to_console), msg, flush=False
+ )
+
+ def Flush(self):
+ self.cmdfd.flush()
+ self.stdout.flush()
+ self.stderr.flush()
class MockLogger(object):
- """Logging helper class."""
-
- MAX_LOG_FILES = 10
-
- def __init__(self, *_args, **_kwargs):
- self.stdout = sys.stdout
- self.stderr = sys.stderr
-
- def _AddSuffix(self, basename, suffix):
- return '%s%s' % (basename, suffix)
-
- def _FindSuffix(self, basename):
- timestamps = []
- found_suffix = None
- for i in range(self.MAX_LOG_FILES):
- suffix = str(i)
- suffixed_basename = self._AddSuffix(basename, suffix)
- cmd_file = '%s.cmd' % suffixed_basename
- if not os.path.exists(cmd_file):
- found_suffix = suffix
- break
- timestamps.append(os.stat(cmd_file).st_mtime)
-
- if found_suffix:
- return found_suffix
-
- # Try to pick the oldest file with the suffix and return that one.
- suffix = str(timestamps.index(min(timestamps)))
- # print ("Warning: Overwriting log file: %s" %
- # self._AddSuffix(basename, suffix))
- return suffix
-
- def _CreateLogFileHandle(self, name):
- print('MockLogger: creating open file handle for %s (writing)' % name)
-
- def _CreateLogFileHandles(self, basename):
- suffix = self._FindSuffix(basename)
- suffixed_basename = self._AddSuffix(basename, suffix)
-
- print('MockLogger: opening file %s.cmd' % suffixed_basename)
- print('MockLogger: opening file %s.out' % suffixed_basename)
- print('MockLogger: opening file %s.err' % suffixed_basename)
+ """Logging helper class."""
+
+ MAX_LOG_FILES = 10
+
+ def __init__(self, *_args, **_kwargs):
+ self.stdout = sys.stdout
+ self.stderr = sys.stderr
+
+ def _AddSuffix(self, basename, suffix):
+ return "%s%s" % (basename, suffix)
+
+ def _FindSuffix(self, basename):
+ timestamps = []
+ found_suffix = None
+ for i in range(self.MAX_LOG_FILES):
+ suffix = str(i)
+ suffixed_basename = self._AddSuffix(basename, suffix)
+ cmd_file = "%s.cmd" % suffixed_basename
+ if not os.path.exists(cmd_file):
+ found_suffix = suffix
+ break
+ timestamps.append(os.stat(cmd_file).st_mtime)
+
+ if found_suffix:
+ return found_suffix
+
+ # Try to pick the oldest file with the suffix and return that one.
+ suffix = str(timestamps.index(min(timestamps)))
+ # print ("Warning: Overwriting log file: %s" %
+ # self._AddSuffix(basename, suffix))
+ return suffix
+
+ def _CreateLogFileHandle(self, name):
+ print("MockLogger: creating open file handle for %s (writing)" % name)
+
+ def _CreateLogFileHandles(self, basename):
+ suffix = self._FindSuffix(basename)
+ suffixed_basename = self._AddSuffix(basename, suffix)
+
+ print("MockLogger: opening file %s.cmd" % suffixed_basename)
+ print("MockLogger: opening file %s.out" % suffixed_basename)
+ print("MockLogger: opening file %s.err" % suffixed_basename)
+
+ self._CreateLogFileSymlinks(basename, suffixed_basename)
+
+ # Symlink unsuffixed basename to currently suffixed one.
+ def _CreateLogFileSymlinks(self, basename, suffixed_basename):
+ for extension in ["cmd", "out", "err"]:
+ src_file = "%s.%s" % (
+ os.path.basename(suffixed_basename),
+ extension,
+ )
+ dest_file = "%s.%s" % (basename, extension)
+ print(
+ "MockLogger: Calling os.symlink(%s, %s)" % (src_file, dest_file)
+ )
+
+ def _WriteTo(self, _fd, msg, _flush):
+ print("MockLogger: %s" % msg)
+
+ def LogStartDots(self, _print_to_console=True):
+ print(". ")
+
+ def LogAppendDot(self, _print_to_console=True):
+ print(". ")
+
+ def LogEndDots(self, _print_to_console=True):
+ print("\n")
+
+ def LogMsg(self, _file_fd, _term_fd, msg, **_kwargs):
+ print("MockLogger: %s" % msg)
+
+ def _GetStdout(self, _print_to_console):
+ return None
+
+ def _GetStderr(self, _print_to_console):
+ return None
+
+ def LogCmdToFileOnly(self, *_args, **_kwargs):
+ return
+
+ # def LogCmdToFileOnly(self, cmd, machine='', user=None):
+ # host = ('%s@%s' % (user, machine)) if user else machine
+ # cmd_string = 'CMD (%s): %s\n' % (host, cmd)
+ # print('MockLogger: Writing to file ONLY: %s' % cmd_string)
+
+ def LogCmd(self, cmd, machine="", user=None, print_to_console=True):
+ if user:
+ host = "%s@%s" % (user, machine)
+ else:
+ host = machine
+
+ self.LogMsg(
+ 0, self._GetStdout(print_to_console), "CMD (%s): %s\n" % (host, cmd)
+ )
+
+ def LogFatal(self, msg, print_to_console=True):
+ self.LogMsg(0, self._GetStderr(print_to_console), "FATAL: %s\n" % msg)
+ self.LogMsg(
+ 0,
+ self._GetStderr(print_to_console),
+ "\n".join(traceback.format_stack()),
+ )
+ print("MockLogger: Calling sysexit(1)")
+
+ def LogError(self, msg, print_to_console=True):
+ self.LogMsg(0, self._GetStderr(print_to_console), "ERROR: %s\n" % msg)
+
+ def LogWarning(self, msg, print_to_console=True):
+ self.LogMsg(0, self._GetStderr(print_to_console), "WARNING: %s\n" % msg)
+
+ def LogOutput(self, msg, print_to_console=True):
+ self.LogMsg(0, self._GetStdout(print_to_console), "OUTPUT: %s\n" % msg)
+
+ def LogFatalIf(self, condition, msg):
+ if condition:
+ self.LogFatal(msg)
- self._CreateLogFileSymlinks(basename, suffixed_basename)
+ def LogErrorIf(self, condition, msg):
+ if condition:
+ self.LogError(msg)
- # Symlink unsuffixed basename to currently suffixed one.
- def _CreateLogFileSymlinks(self, basename, suffixed_basename):
- for extension in ['cmd', 'out', 'err']:
- src_file = '%s.%s' % (os.path.basename(suffixed_basename), extension)
- dest_file = '%s.%s' % (basename, extension)
- print('MockLogger: Calling os.symlink(%s, %s)' % (src_file, dest_file))
+ def LogWarningIf(self, condition, msg):
+ if condition:
+ self.LogWarning(msg)
- def _WriteTo(self, _fd, msg, _flush):
- print('MockLogger: %s' % msg)
+ def LogCommandOutput(self, msg, print_to_console=True):
+ self.LogMsg(
+ self.stdout, self._GetStdout(print_to_console), msg, flush=False
+ )
- def LogStartDots(self, _print_to_console=True):
- print('. ')
+ def LogCommandError(self, msg, print_to_console=True):
+ self.LogMsg(
+ self.stderr, self._GetStderr(print_to_console), msg, flush=False
+ )
- def LogAppendDot(self, _print_to_console=True):
- print('. ')
-
- def LogEndDots(self, _print_to_console=True):
- print('\n')
-
- def LogMsg(self, _file_fd, _term_fd, msg, **_kwargs):
- print('MockLogger: %s' % msg)
-
- def _GetStdout(self, _print_to_console):
- return None
-
- def _GetStderr(self, _print_to_console):
- return None
-
- def LogCmdToFileOnly(self, *_args, **_kwargs):
- return
-
- # def LogCmdToFileOnly(self, cmd, machine='', user=None):
- # host = ('%s@%s' % (user, machine)) if user else machine
- # cmd_string = 'CMD (%s): %s\n' % (host, cmd)
- # print('MockLogger: Writing to file ONLY: %s' % cmd_string)
-
- def LogCmd(self, cmd, machine='', user=None, print_to_console=True):
- if user:
- host = '%s@%s' % (user, machine)
- else:
- host = machine
-
- self.LogMsg(0, self._GetStdout(print_to_console),
- 'CMD (%s): %s\n' % (host, cmd))
-
- def LogFatal(self, msg, print_to_console=True):
- self.LogMsg(0, self._GetStderr(print_to_console), 'FATAL: %s\n' % msg)
- self.LogMsg(0, self._GetStderr(print_to_console),
- '\n'.join(traceback.format_stack()))
- print('MockLogger: Calling sysexit(1)')
-
- def LogError(self, msg, print_to_console=True):
- self.LogMsg(0, self._GetStderr(print_to_console), 'ERROR: %s\n' % msg)
-
- def LogWarning(self, msg, print_to_console=True):
- self.LogMsg(0, self._GetStderr(print_to_console), 'WARNING: %s\n' % msg)
-
- def LogOutput(self, msg, print_to_console=True):
- self.LogMsg(0, self._GetStdout(print_to_console), 'OUTPUT: %s\n' % msg)
-
- def LogFatalIf(self, condition, msg):
- if condition:
- self.LogFatal(msg)
-
- def LogErrorIf(self, condition, msg):
- if condition:
- self.LogError(msg)
-
- def LogWarningIf(self, condition, msg):
- if condition:
- self.LogWarning(msg)
-
- def LogCommandOutput(self, msg, print_to_console=True):
- self.LogMsg(
- self.stdout, self._GetStdout(print_to_console), msg, flush=False)
-
- def LogCommandError(self, msg, print_to_console=True):
- self.LogMsg(
- self.stderr, self._GetStderr(print_to_console), msg, flush=False)
-
- def Flush(self):
- print('MockLogger: Flushing cmdfd, stdout, stderr')
+ def Flush(self):
+ print("MockLogger: Flushing cmdfd, stdout, stderr")
main_logger = None
def InitLogger(script_name, log_dir, print_console=True, mock=False):
- """Initialize a global logger. To be called only once."""
- # pylint: disable=global-statement
- global main_logger
- assert not main_logger, 'The logger has already been initialized'
- rootdir, basefilename = GetRoot(script_name)
- if not log_dir:
- log_dir = rootdir
- if not mock:
- main_logger = Logger(log_dir, basefilename, print_console)
- else:
- main_logger = MockLogger(log_dir, basefilename, print_console)
+ """Initialize a global logger. To be called only once."""
+ # pylint: disable=global-statement
+ global main_logger
+ assert not main_logger, "The logger has already been initialized"
+ rootdir, basefilename = GetRoot(script_name)
+ if not log_dir:
+ log_dir = rootdir
+ if not mock:
+ main_logger = Logger(log_dir, basefilename, print_console)
+ else:
+ main_logger = MockLogger(log_dir, basefilename, print_console)
-def GetLogger(log_dir='', mock=False):
- if not main_logger:
- InitLogger(sys.argv[0], log_dir, mock=mock)
- return main_logger
+def GetLogger(log_dir="", mock=False):
+ if not main_logger:
+ InitLogger(sys.argv[0], log_dir, mock=mock)
+ return main_logger
def HandleUncaughtExceptions(fun):
- """Catches all exceptions that would go outside decorated fun scope."""
+ """Catches all exceptions that would go outside decorated fun scope."""
- def _Interceptor(*args, **kwargs):
- try:
- return fun(*args, **kwargs)
- except Exception:
- GetLogger().LogFatal('Uncaught exception:\n%s' % traceback.format_exc())
+ def _Interceptor(*args, **kwargs):
+ try:
+ return fun(*args, **kwargs)
+ except Exception:
+ GetLogger().LogFatal(
+ "Uncaught exception:\n%s" % traceback.format_exc()
+ )
- return _Interceptor
+ return _Interceptor
diff --git a/cros_utils/machines.py b/cros_utils/machines.py
index 89b51b01..a5385731 100644
--- a/cros_utils/machines.py
+++ b/cros_utils/machines.py
@@ -1,27 +1,26 @@
# -*- coding: utf-8 -*-
-# Copyright 2015 The Chromium OS Authors. All rights reserved.
+# Copyright 2015 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utilities relating to machine-specific functions."""
-from __future__ import print_function
from cros_utils import command_executer
-def MachineIsPingable(machine, logging_level='average'):
- """Checks to see if a machine is responding to 'ping'.
+def MachineIsPingable(machine, logging_level="average"):
+ """Checks to see if a machine is responding to 'ping'.
- Args:
- machine: String containing the name or ip address of the machine to check.
- logging_level: The logging level with which to initialize the
- command_executer (from command_executor.LOG_LEVEL enum list).
+ Args:
+ machine: String containing the name or ip address of the machine to check.
+ logging_level: The logging level with which to initialize the
+ command_executer (from command_executor.LOG_LEVEL enum list).
- Returns:
- Boolean indicating whether machine is responding to ping or not.
- """
- ce = command_executer.GetCommandExecuter(log_level=logging_level)
- cmd = 'ping -c 1 -w 3 %s' % machine
- status = ce.RunCommand(cmd)
- return status == 0
+ Returns:
+ Boolean indicating whether machine is responding to ping or not.
+ """
+ ce = command_executer.GetCommandExecuter(log_level=logging_level)
+ cmd = "ping -c 1 -w 3 %s" % machine
+ status = ce.RunCommand(cmd)
+ return status == 0
diff --git a/cros_utils/misc.py b/cros_utils/misc.py
index a0d0de73..aabb5ad7 100644
--- a/cros_utils/misc.py
+++ b/cros_utils/misc.py
@@ -1,14 +1,12 @@
# -*- coding: utf-8 -*-
-# Copyright 2013 The Chromium OS Authors. All rights reserved.
+# Copyright 2013 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utilities for toolchain build."""
-from __future__ import division
-from __future__ import print_function
-__author__ = 'asharif@google.com (Ahmad Sharif)'
+__author__ = "asharif@google.com (Ahmad Sharif)"
from contextlib import contextmanager
import os
@@ -19,495 +17,545 @@ import sys
from cros_utils import command_executer
from cros_utils import logger
-CHROMEOS_SCRIPTS_DIR = '/mnt/host/source/src/scripts'
-TOOLCHAIN_UTILS_PATH = ('/mnt/host/source/src/third_party/toolchain-utils/'
- 'cros_utils/toolchain_utils.sh')
+
+CHROMEOS_SCRIPTS_DIR = "/mnt/host/source/src/scripts"
+TOOLCHAIN_UTILS_PATH = (
+ "/mnt/host/source/src/third_party/toolchain-utils/"
+ "cros_utils/toolchain_utils.sh"
+)
def GetChromeOSVersionFromLSBVersion(lsb_version):
- """Get Chromeos version from Lsb version."""
- ce = command_executer.GetCommandExecuter()
- command = ('git ls-remote '
- 'https://chromium.googlesource.com/chromiumos/manifest.git '
- 'refs/heads/release-R*')
- ret, out, _ = ce.RunCommandWOutput(command, print_to_console=False)
- assert ret == 0, 'Command %s failed' % command
- lower = []
- for line in out.splitlines():
- mo = re.search(r'refs/heads/release-R(\d+)-(\d+)\.B', line)
- if mo:
- revision = int(mo.group(1))
- build = int(mo.group(2))
- lsb_build = int(lsb_version.split('.')[0])
- if lsb_build > build:
- lower.append(revision)
- lower = sorted(lower)
- if lower:
- return 'R%d-%s' % (lower[-1] + 1, lsb_version)
- else:
- return 'Unknown'
+ """Get Chromeos version from Lsb version."""
+ ce = command_executer.GetCommandExecuter()
+ command = (
+ "git ls-remote "
+ "https://chromium.googlesource.com/chromiumos/manifest.git "
+ "refs/heads/release-R*"
+ )
+ ret, out, _ = ce.RunCommandWOutput(command, print_to_console=False)
+ assert ret == 0, "Command %s failed" % command
+ lower = []
+ for line in out.splitlines():
+ mo = re.search(r"refs/heads/release-R(\d+)-(\d+)\.B", line)
+ if mo:
+ revision = int(mo.group(1))
+ build = int(mo.group(2))
+ lsb_build = int(lsb_version.split(".")[0])
+ if lsb_build > build:
+ lower.append(revision)
+ lower = sorted(lower)
+ if lower:
+ return "R%d-%s" % (lower[-1] + 1, lsb_version)
+ else:
+ return "Unknown"
def ApplySubs(string, *substitutions):
- for pattern, replacement in substitutions:
- string = re.sub(pattern, replacement, string)
- return string
+ for pattern, replacement in substitutions:
+ string = re.sub(pattern, replacement, string)
+ return string
def UnitToNumber(unit_num, base=1000):
- """Convert a number with unit to float."""
- unit_dict = {'kilo': base, 'mega': base**2, 'giga': base**3}
- unit_num = unit_num.lower()
- mo = re.search(r'(\d*)(.+)?', unit_num)
- number = mo.group(1)
- unit = mo.group(2)
- if not unit:
- return float(number)
- for k, v in unit_dict.items():
- if k.startswith(unit):
- return float(number) * v
- raise RuntimeError('Unit: %s not found in byte: %s!' % (unit, unit_num))
+ """Convert a number with unit to float."""
+ unit_dict = {"kilo": base, "mega": base ** 2, "giga": base ** 3}
+ unit_num = unit_num.lower()
+ mo = re.search(r"(\d*)(.+)?", unit_num)
+ number = mo.group(1)
+ unit = mo.group(2)
+ if not unit:
+ return float(number)
+ for k, v in unit_dict.items():
+ if k.startswith(unit):
+ return float(number) * v
+ raise RuntimeError("Unit: %s not found in byte: %s!" % (unit, unit_num))
def GetFilenameFromString(string):
- return ApplySubs(
- string,
- (r'/', '__'),
- (r'\s', '_'),
- (r'[\\$="?^]', ''),
- )
+ return ApplySubs(
+ string,
+ (r"/", "__"),
+ (r"\s", "_"),
+ (r'[\\$="?^]', ""),
+ )
def GetRoot(scr_name):
- """Break up pathname into (dir+name)."""
- abs_path = os.path.abspath(scr_name)
- return (os.path.dirname(abs_path), os.path.basename(abs_path))
+ """Break up pathname into (dir+name)."""
+ abs_path = os.path.abspath(scr_name)
+ return (os.path.dirname(abs_path), os.path.basename(abs_path))
def GetChromeOSKeyFile(chromeos_root):
- return os.path.join(chromeos_root, 'src', 'scripts', 'mod_for_test_scripts',
- 'ssh_keys', 'testing_rsa')
+ return os.path.join(
+ chromeos_root,
+ "src",
+ "scripts",
+ "mod_for_test_scripts",
+ "ssh_keys",
+ "testing_rsa",
+ )
def GetChrootPath(chromeos_root):
- return os.path.join(chromeos_root, 'chroot')
+ return os.path.join(chromeos_root, "chroot")
def GetInsideChrootPath(chromeos_root, file_path):
- if not file_path.startswith(GetChrootPath(chromeos_root)):
- raise RuntimeError("File: %s doesn't seem to be in the chroot: %s" %
- (file_path, chromeos_root))
- return file_path[len(GetChrootPath(chromeos_root)):]
+ if not file_path.startswith(GetChrootPath(chromeos_root)):
+ raise RuntimeError(
+ "File: %s doesn't seem to be in the chroot: %s"
+ % (file_path, chromeos_root)
+ )
+ return file_path[len(GetChrootPath(chromeos_root)) :]
def GetOutsideChrootPath(chromeos_root, file_path):
- return os.path.join(GetChrootPath(chromeos_root), file_path.lstrip('/'))
+ return os.path.join(GetChrootPath(chromeos_root), file_path.lstrip("/"))
def FormatQuotedCommand(command):
- return ApplySubs(command, ('"', r'\"'))
+ return ApplySubs(command, ('"', r"\""))
def FormatCommands(commands):
- return ApplySubs(str(commands), ('&&', '&&\n'), (';', ';\n'),
- (r'\n+\s*', '\n'))
+ return ApplySubs(
+ str(commands), ("&&", "&&\n"), (";", ";\n"), (r"\n+\s*", "\n")
+ )
def GetImageDir(chromeos_root, board):
- return os.path.join(chromeos_root, 'src', 'build', 'images', board)
+ return os.path.join(chromeos_root, "src", "build", "images", board)
def LabelLatestImage(chromeos_root, board, label, vanilla_path=None):
- image_dir = GetImageDir(chromeos_root, board)
- latest_image_dir = os.path.join(image_dir, 'latest')
- latest_image_dir = os.path.realpath(latest_image_dir)
- latest_image_dir = os.path.basename(latest_image_dir)
- retval = 0
- with WorkingDirectory(image_dir):
- command = 'ln -sf -T %s %s' % (latest_image_dir, label)
- ce = command_executer.GetCommandExecuter()
- retval = ce.RunCommand(command)
- if retval:
- return retval
- if vanilla_path:
- command = 'ln -sf -T %s %s' % (vanilla_path, 'vanilla')
- retval2 = ce.RunCommand(command)
- return retval2
- return retval
+ image_dir = GetImageDir(chromeos_root, board)
+ latest_image_dir = os.path.join(image_dir, "latest")
+ latest_image_dir = os.path.realpath(latest_image_dir)
+ latest_image_dir = os.path.basename(latest_image_dir)
+ retval = 0
+ with WorkingDirectory(image_dir):
+ command = "ln -sf -T %s %s" % (latest_image_dir, label)
+ ce = command_executer.GetCommandExecuter()
+ retval = ce.RunCommand(command)
+ if retval:
+ return retval
+ if vanilla_path:
+ command = "ln -sf -T %s %s" % (vanilla_path, "vanilla")
+ retval2 = ce.RunCommand(command)
+ return retval2
+ return retval
def DoesLabelExist(chromeos_root, board, label):
- image_label = os.path.join(GetImageDir(chromeos_root, board), label)
- return os.path.exists(image_label)
+ image_label = os.path.join(GetImageDir(chromeos_root, board), label)
+ return os.path.exists(image_label)
def GetBuildPackagesCommand(board, usepkg=False, debug=False):
- if usepkg:
- usepkg_flag = '--usepkg'
- else:
- usepkg_flag = '--nousepkg'
- if debug:
- withdebug_flag = '--withdebug'
- else:
- withdebug_flag = '--nowithdebug'
- return ('%s/build_packages %s --withdev --withtest --withautotest '
- '--skip_toolchain_update %s --board=%s '
- '--accept_licenses=@CHROMEOS' %
- (CHROMEOS_SCRIPTS_DIR, usepkg_flag, withdebug_flag, board))
+ if usepkg:
+ usepkg_flag = "--usepkg"
+ else:
+ usepkg_flag = "--nousepkg"
+ if debug:
+ withdebug_flag = "--withdebug"
+ else:
+ withdebug_flag = "--nowithdebug"
+ return (
+ "%s/build_packages %s --withdev --withtest --withautotest "
+ "--skip_toolchain_update %s --board=%s "
+ "--accept_licenses=@CHROMEOS"
+ % (CHROMEOS_SCRIPTS_DIR, usepkg_flag, withdebug_flag, board)
+ )
def GetBuildImageCommand(board, dev=False):
- dev_args = ''
- if dev:
- dev_args = '--noenable_rootfs_verification --disk_layout=2gb-rootfs'
- return ('%s/build_image --board=%s %s test' %
- (CHROMEOS_SCRIPTS_DIR, board, dev_args))
+ dev_args = ""
+ if dev:
+ dev_args = "--noenable_rootfs_verification --disk_layout=2gb-rootfs"
+ return "%s/build_image --board=%s %s test" % (
+ CHROMEOS_SCRIPTS_DIR,
+ board,
+ dev_args,
+ )
def GetSetupBoardCommand(board, usepkg=None, force=None):
- """Get setup_board command."""
- options = []
+ """Get setup_board command."""
+ options = []
- if usepkg:
- options.append('--usepkg')
- else:
- options.append('--nousepkg')
+ if usepkg:
+ options.append("--usepkg")
+ else:
+ options.append("--nousepkg")
- if force:
- options.append('--force')
+ if force:
+ options.append("--force")
- options.append('--accept-licenses=@CHROMEOS')
+ options.append("--accept-licenses=@CHROMEOS")
- return 'setup_board --board=%s %s' % (board, ' '.join(options))
+ return "setup_board --board=%s %s" % (board, " ".join(options))
def CanonicalizePath(path):
- path = os.path.expanduser(path)
- path = os.path.realpath(path)
- return path
+ path = os.path.expanduser(path)
+ path = os.path.realpath(path)
+ return path
def GetCtargetFromBoard(board, chromeos_root):
- """Get Ctarget from board."""
- base_board = board.split('_')[0]
- command = ('source %s; get_ctarget_from_board %s' %
- (TOOLCHAIN_UTILS_PATH, base_board))
- ce = command_executer.GetCommandExecuter()
- ret, out, _ = ce.ChrootRunCommandWOutput(chromeos_root, command)
- if ret != 0:
- raise ValueError('Board %s is invalid!' % board)
- # Remove ANSI escape sequences.
- out = StripANSIEscapeSequences(out)
- return out.strip()
+ """Get Ctarget from board."""
+ base_board = board.split("_")[0]
+ command = "source %s; get_ctarget_from_board %s" % (
+ TOOLCHAIN_UTILS_PATH,
+ base_board,
+ )
+ ce = command_executer.GetCommandExecuter()
+ ret, out, _ = ce.ChrootRunCommandWOutput(chromeos_root, command)
+ if ret != 0:
+ raise ValueError("Board %s is invalid!" % board)
+ # Remove ANSI escape sequences.
+ out = StripANSIEscapeSequences(out)
+ return out.strip()
def GetArchFromBoard(board, chromeos_root):
- """Get Arch from board."""
- base_board = board.split('_')[0]
- command = ('source %s; get_board_arch %s' %
- (TOOLCHAIN_UTILS_PATH, base_board))
- ce = command_executer.GetCommandExecuter()
- ret, out, _ = ce.ChrootRunCommandWOutput(chromeos_root, command)
- if ret != 0:
- raise ValueError('Board %s is invalid!' % board)
- # Remove ANSI escape sequences.
- out = StripANSIEscapeSequences(out)
- return out.strip()
+ """Get Arch from board."""
+ base_board = board.split("_")[0]
+ command = "source %s; get_board_arch %s" % (
+ TOOLCHAIN_UTILS_PATH,
+ base_board,
+ )
+ ce = command_executer.GetCommandExecuter()
+ ret, out, _ = ce.ChrootRunCommandWOutput(chromeos_root, command)
+ if ret != 0:
+ raise ValueError("Board %s is invalid!" % board)
+ # Remove ANSI escape sequences.
+ out = StripANSIEscapeSequences(out)
+ return out.strip()
def GetGccLibsDestForBoard(board, chromeos_root):
- """Get gcc libs destination from board."""
- arch = GetArchFromBoard(board, chromeos_root)
- if arch == 'x86':
- return '/build/%s/usr/lib/gcc/' % board
- if arch == 'amd64':
- return '/build/%s/usr/lib64/gcc/' % board
- if arch == 'arm':
- return '/build/%s/usr/lib/gcc/' % board
- if arch == 'arm64':
- return '/build/%s/usr/lib/gcc/' % board
- raise ValueError('Arch %s is invalid!' % arch)
+ """Get gcc libs destination from board."""
+ arch = GetArchFromBoard(board, chromeos_root)
+ if arch == "x86":
+ return "/build/%s/usr/lib/gcc/" % board
+ if arch == "amd64":
+ return "/build/%s/usr/lib64/gcc/" % board
+ if arch == "arm":
+ return "/build/%s/usr/lib/gcc/" % board
+ if arch == "arm64":
+ return "/build/%s/usr/lib/gcc/" % board
+ raise ValueError("Arch %s is invalid!" % arch)
def StripANSIEscapeSequences(string):
- string = re.sub(r'\x1b\[[0-9]*[a-zA-Z]', '', string)
- return string
+ string = re.sub(r"\x1b\[[0-9]*[a-zA-Z]", "", string)
+ return string
def GetChromeSrcDir():
- return 'var/cache/distfiles/target/chrome-src/src'
+ return "var/cache/distfiles/target/chrome-src/src"
def GetEnvStringFromDict(env_dict):
- return ' '.join(['%s="%s"' % var for var in env_dict.items()])
+ return " ".join(['%s="%s"' % var for var in env_dict.items()])
def MergeEnvStringWithDict(env_string, env_dict, prepend=True):
- """Merge env string with dict."""
- if not env_string.strip():
- return GetEnvStringFromDict(env_dict)
- override_env_list = []
- ce = command_executer.GetCommandExecuter()
- for k, v in env_dict.items():
- v = v.strip('"\'')
- if prepend:
- new_env = '%s="%s $%s"' % (k, v, k)
- else:
- new_env = '%s="$%s %s"' % (k, k, v)
- command = '; '.join([env_string, new_env, 'echo $%s' % k])
- ret, out, _ = ce.RunCommandWOutput(command)
- override_env_list.append('%s=%r' % (k, out.strip()))
- ret = env_string + ' ' + ' '.join(override_env_list)
- return ret.strip()
+ """Merge env string with dict."""
+ if not env_string.strip():
+ return GetEnvStringFromDict(env_dict)
+ override_env_list = []
+ ce = command_executer.GetCommandExecuter()
+ for k, v in env_dict.items():
+ v = v.strip("\"'")
+ if prepend:
+ new_env = '%s="%s $%s"' % (k, v, k)
+ else:
+ new_env = '%s="$%s %s"' % (k, k, v)
+ command = "; ".join([env_string, new_env, "echo $%s" % k])
+ ret, out, _ = ce.RunCommandWOutput(command)
+ override_env_list.append("%s=%r" % (k, out.strip()))
+ ret = env_string + " " + " ".join(override_env_list)
+ return ret.strip()
def GetAllImages(chromeos_root, board):
- ce = command_executer.GetCommandExecuter()
- command = ('find %s/src/build/images/%s -name chromiumos_test_image.bin' %
- (chromeos_root, board))
- ret, out, _ = ce.RunCommandWOutput(command)
- assert ret == 0, 'Could not run command: %s' % command
- return out.splitlines()
+ ce = command_executer.GetCommandExecuter()
+ command = "find %s/src/build/images/%s -name chromiumos_test_image.bin" % (
+ chromeos_root,
+ board,
+ )
+ ret, out, _ = ce.RunCommandWOutput(command)
+ assert ret == 0, "Could not run command: %s" % command
+ return out.splitlines()
def IsFloat(text):
- if text is None:
- return False
- try:
- float(text)
- return True
- except ValueError:
- return False
+ if text is None:
+ return False
+ try:
+ float(text)
+ return True
+ except ValueError:
+ return False
def RemoveChromeBrowserObjectFiles(chromeos_root, board):
- """Remove any object files from all the posible locations."""
- out_dir = os.path.join(
- GetChrootPath(chromeos_root),
- 'var/cache/chromeos-chrome/chrome-src/src/out_%s' % board)
- if os.path.exists(out_dir):
- shutil.rmtree(out_dir)
- logger.GetLogger().LogCmd('rm -rf %s' % out_dir)
- out_dir = os.path.join(
- GetChrootPath(chromeos_root),
- 'var/cache/chromeos-chrome/chrome-src-internal/src/out_%s' % board)
- if os.path.exists(out_dir):
- shutil.rmtree(out_dir)
- logger.GetLogger().LogCmd('rm -rf %s' % out_dir)
+ """Remove any object files from all the posible locations."""
+ out_dir = os.path.join(
+ GetChrootPath(chromeos_root),
+ "var/cache/chromeos-chrome/chrome-src/src/out_%s" % board,
+ )
+ if os.path.exists(out_dir):
+ shutil.rmtree(out_dir)
+ logger.GetLogger().LogCmd("rm -rf %s" % out_dir)
+ out_dir = os.path.join(
+ GetChrootPath(chromeos_root),
+ "var/cache/chromeos-chrome/chrome-src-internal/src/out_%s" % board,
+ )
+ if os.path.exists(out_dir):
+ shutil.rmtree(out_dir)
+ logger.GetLogger().LogCmd("rm -rf %s" % out_dir)
@contextmanager
def WorkingDirectory(new_dir):
- """Get the working directory."""
- old_dir = os.getcwd()
- if old_dir != new_dir:
- msg = 'cd %s' % new_dir
- logger.GetLogger().LogCmd(msg)
- os.chdir(new_dir)
- yield new_dir
- if old_dir != new_dir:
- msg = 'cd %s' % old_dir
- logger.GetLogger().LogCmd(msg)
- os.chdir(old_dir)
+ """Get the working directory."""
+ old_dir = os.getcwd()
+ if old_dir != new_dir:
+ msg = "cd %s" % new_dir
+ logger.GetLogger().LogCmd(msg)
+ os.chdir(new_dir)
+ yield new_dir
+ if old_dir != new_dir:
+ msg = "cd %s" % old_dir
+ logger.GetLogger().LogCmd(msg)
+ os.chdir(old_dir)
def HasGitStagedChanges(git_dir):
- """Return True if git repository has staged changes."""
- command = f'cd {git_dir} && git diff --quiet --cached --exit-code HEAD'
- return command_executer.GetCommandExecuter().RunCommand(
- command, print_to_console=False)
+ """Return True if git repository has staged changes."""
+ command = f"cd {git_dir} && git diff --quiet --cached --exit-code HEAD"
+ return command_executer.GetCommandExecuter().RunCommand(
+ command, print_to_console=False
+ )
def HasGitUnstagedChanges(git_dir):
- """Return True if git repository has un-staged changes."""
- command = f'cd {git_dir} && git diff --quiet --exit-code HEAD'
- return command_executer.GetCommandExecuter().RunCommand(
- command, print_to_console=False)
+ """Return True if git repository has un-staged changes."""
+ command = f"cd {git_dir} && git diff --quiet --exit-code HEAD"
+ return command_executer.GetCommandExecuter().RunCommand(
+ command, print_to_console=False
+ )
def HasGitUntrackedChanges(git_dir):
- """Return True if git repository has un-tracked changes."""
- command = (f'cd {git_dir} && test -z '
- '$(git ls-files --exclude-standard --others)')
- return command_executer.GetCommandExecuter().RunCommand(
- command, print_to_console=False)
+ """Return True if git repository has un-tracked changes."""
+ command = (
+ f"cd {git_dir} && test -z "
+ "$(git ls-files --exclude-standard --others)"
+ )
+ return command_executer.GetCommandExecuter().RunCommand(
+ command, print_to_console=False
+ )
def GitGetCommitHash(git_dir, commit_symbolic_name):
- """Return githash for the symbolic git commit.
+ """Return githash for the symbolic git commit.
- For example, commit_symbolic_name could be
- "cros/gcc.gnu.org/branches/gcc/gcc-4_8-mobile, this function returns the git
- hash for this symbolic name.
+ For example, commit_symbolic_name could be
+ "cros/gcc.gnu.org/branches/gcc/gcc-4_8-mobile, this function returns the git
+ hash for this symbolic name.
- Args:
- git_dir: a git working tree.
- commit_symbolic_name: a symbolic name for a particular git commit.
+ Args:
+ git_dir: a git working tree.
+ commit_symbolic_name: a symbolic name for a particular git commit.
- Returns:
- The git hash for the symbolic name or None if fails.
- """
+ Returns:
+ The git hash for the symbolic name or None if fails.
+ """
- command = (f'cd {git_dir} && git log -n 1'
- f' --pretty="format:%H" {commit_symbolic_name}')
- rv, out, _ = command_executer.GetCommandExecuter().RunCommandWOutput(
- command, print_to_console=False)
- if rv == 0:
- return out.strip()
- return None
+ command = (
+ f"cd {git_dir} && git log -n 1"
+ f' --pretty="format:%H" {commit_symbolic_name}'
+ )
+ rv, out, _ = command_executer.GetCommandExecuter().RunCommandWOutput(
+ command, print_to_console=False
+ )
+ if rv == 0:
+ return out.strip()
+ return None
def IsGitTreeClean(git_dir):
- """Test if git tree has no local changes.
-
- Args:
- git_dir: git tree directory.
-
- Returns:
- True if git dir is clean.
- """
- if HasGitStagedChanges(git_dir):
- logger.GetLogger().LogWarning('Git tree has staged changes.')
- return False
- if HasGitUnstagedChanges(git_dir):
- logger.GetLogger().LogWarning('Git tree has unstaged changes.')
- return False
- if HasGitUntrackedChanges(git_dir):
- logger.GetLogger().LogWarning('Git tree has un-tracked changes.')
- return False
- return True
+ """Test if git tree has no local changes.
+
+ Args:
+ git_dir: git tree directory.
+
+ Returns:
+ True if git dir is clean.
+ """
+ if HasGitStagedChanges(git_dir):
+ logger.GetLogger().LogWarning("Git tree has staged changes.")
+ return False
+ if HasGitUnstagedChanges(git_dir):
+ logger.GetLogger().LogWarning("Git tree has unstaged changes.")
+ return False
+ if HasGitUntrackedChanges(git_dir):
+ logger.GetLogger().LogWarning("Git tree has un-tracked changes.")
+ return False
+ return True
def GetGitChangesAsList(git_dir, path=None, staged=False):
- """Get changed files as a list.
-
- Args:
- git_dir: git tree directory.
- path: a relative path that is part of the tree directory, could be null.
- staged: whether to include staged files as well.
-
- Returns:
- A list containing all the changed files.
- """
- command = f'cd {git_dir} && git diff --name-only'
- if staged:
- command += ' --cached'
- if path:
- command += ' -- ' + path
- _, out, _ = command_executer.GetCommandExecuter().RunCommandWOutput(
- command, print_to_console=False)
- rv = []
- for line in out.splitlines():
- rv.append(line)
- return rv
+ """Get changed files as a list.
+
+ Args:
+ git_dir: git tree directory.
+ path: a relative path that is part of the tree directory, could be null.
+ staged: whether to include staged files as well.
+
+ Returns:
+ A list containing all the changed files.
+ """
+ command = f"cd {git_dir} && git diff --name-only"
+ if staged:
+ command += " --cached"
+ if path:
+ command += " -- " + path
+ _, out, _ = command_executer.GetCommandExecuter().RunCommandWOutput(
+ command, print_to_console=False
+ )
+ rv = []
+ for line in out.splitlines():
+ rv.append(line)
+ return rv
def IsChromeOsTree(chromeos_root):
- return (os.path.isdir(
- os.path.join(chromeos_root, 'src/third_party/chromiumos-overlay'))
- and os.path.isdir(os.path.join(chromeos_root, 'manifest')))
+ return os.path.isdir(
+ os.path.join(chromeos_root, "src/third_party/chromiumos-overlay")
+ ) and os.path.isdir(os.path.join(chromeos_root, "manifest"))
def DeleteChromeOsTree(chromeos_root, dry_run=False):
- """Delete a ChromeOs tree *safely*.
-
- Args:
- chromeos_root: dir of the tree, could be a relative one (but be careful)
- dry_run: only prints out the command if True
-
- Returns:
- True if everything is ok.
- """
- if not IsChromeOsTree(chromeos_root):
- logger.GetLogger().LogWarning(f'"{chromeos_root}" does not seem to be a'
- ' valid chromeos tree, do nothing.')
- return False
- cmd0 = f'cd {chromeos_root} && cros_sdk --delete'
- if dry_run:
- print(cmd0)
- else:
- if command_executer.GetCommandExecuter().RunCommand(
- cmd0, print_to_console=True) != 0:
- return False
-
- cmd1 = (
- f'export CHROMEOSDIRNAME="$(dirname $(cd {chromeos_root} && pwd))" && '
- f'export CHROMEOSBASENAME="$(basename $(cd {chromeos_root} && pwd))" && '
- 'cd $CHROMEOSDIRNAME && sudo rm -fr $CHROMEOSBASENAME')
- if dry_run:
- print(cmd1)
- return True
-
- return command_executer.GetCommandExecuter().RunCommand(
- cmd1, print_to_console=True) == 0
-
-
-def BooleanPrompt(prompt='Do you want to continue?',
- default=True,
- true_value='yes',
- false_value='no',
- prolog=None):
- """Helper function for processing boolean choice prompts.
-
- Args:
- prompt: The question to present to the user.
- default: Boolean to return if the user just presses enter.
- true_value: The text to display that represents a True returned.
- false_value: The text to display that represents a False returned.
- prolog: The text to display before prompt.
-
- Returns:
- True or False.
- """
- true_value, false_value = true_value.lower(), false_value.lower()
- true_text, false_text = true_value, false_value
- if true_value == false_value:
- raise ValueError('true_value and false_value must differ: got %r' %
- true_value)
-
- if default:
- true_text = true_text[0].upper() + true_text[1:]
- else:
- false_text = false_text[0].upper() + false_text[1:]
-
- prompt = ('\n%s (%s/%s)? ' % (prompt, true_text, false_text))
-
- if prolog:
- prompt = ('\n%s\n%s' % (prolog, prompt))
-
- while True:
- try:
- # pylint: disable=input-builtin, bad-builtin
- response = input(prompt).lower()
- except EOFError:
- # If the user hits CTRL+D, or stdin is disabled, use the default.
- print()
- response = None
- except KeyboardInterrupt:
- # If the user hits CTRL+C, just exit the process.
- print()
- print('CTRL+C detected; exiting')
- sys.exit()
-
- if not response:
- return default
- if true_value.startswith(response):
- if not false_value.startswith(response):
+ """Delete a ChromeOs tree *safely*.
+
+ Args:
+ chromeos_root: dir of the tree, could be a relative one (but be careful)
+ dry_run: only prints out the command if True
+
+ Returns:
+ True if everything is ok.
+ """
+ if not IsChromeOsTree(chromeos_root):
+ logger.GetLogger().LogWarning(
+ f'"{chromeos_root}" does not seem to be a'
+ " valid chromeos tree, do nothing."
+ )
+ return False
+ cmd0 = f"cd {chromeos_root} && cros_sdk --delete"
+ if dry_run:
+ print(cmd0)
+ else:
+ if (
+ command_executer.GetCommandExecuter().RunCommand(
+ cmd0, print_to_console=True
+ )
+ != 0
+ ):
+ return False
+
+ cmd1 = (
+ f'export CHROMEOSDIRNAME="$(dirname $(cd {chromeos_root} && pwd))" && '
+ f'export CHROMEOSBASENAME="$(basename $(cd {chromeos_root} && pwd))" && '
+ "cd $CHROMEOSDIRNAME && sudo rm -fr $CHROMEOSBASENAME"
+ )
+ if dry_run:
+ print(cmd1)
return True
- # common prefix between the two...
- elif false_value.startswith(response):
- return False
+
+ return (
+ command_executer.GetCommandExecuter().RunCommand(
+ cmd1, print_to_console=True
+ )
+ == 0
+ )
+
+
+def BooleanPrompt(
+ prompt="Do you want to continue?",
+ default=True,
+ true_value="yes",
+ false_value="no",
+ prolog=None,
+):
+ """Helper function for processing boolean choice prompts.
+
+ Args:
+ prompt: The question to present to the user.
+ default: Boolean to return if the user just presses enter.
+ true_value: The text to display that represents a True returned.
+ false_value: The text to display that represents a False returned.
+ prolog: The text to display before prompt.
+
+ Returns:
+ True or False.
+ """
+ true_value, false_value = true_value.lower(), false_value.lower()
+ true_text, false_text = true_value, false_value
+ if true_value == false_value:
+ raise ValueError(
+ "true_value and false_value must differ: got %r" % true_value
+ )
+
+ if default:
+ true_text = true_text[0].upper() + true_text[1:]
+ else:
+ false_text = false_text[0].upper() + false_text[1:]
+
+ prompt = "\n%s (%s/%s)? " % (prompt, true_text, false_text)
+
+ if prolog:
+ prompt = "\n%s\n%s" % (prolog, prompt)
+
+ while True:
+ try:
+ # pylint: disable=input-builtin, bad-builtin
+ response = input(prompt).lower()
+ except EOFError:
+ # If the user hits CTRL+D, or stdin is disabled, use the default.
+ print()
+ response = None
+ except KeyboardInterrupt:
+ # If the user hits CTRL+C, just exit the process.
+ print()
+ print("CTRL+C detected; exiting")
+ sys.exit()
+
+ if not response:
+ return default
+ if true_value.startswith(response):
+ if not false_value.startswith(response):
+ return True
+ # common prefix between the two...
+ elif false_value.startswith(response):
+ return False
# pylint: disable=unused-argument
def rgb2short(r, g, b):
- """Converts RGB values to xterm-256 color."""
+ """Converts RGB values to xterm-256 color."""
- redcolor = [255, 124, 160, 196, 9]
- greencolor = [255, 118, 82, 46, 10]
+ redcolor = [255, 124, 160, 196, 9]
+ greencolor = [255, 118, 82, 46, 10]
- if g == 0:
- return redcolor[r // 52]
- if r == 0:
- return greencolor[g // 52]
- return 4
+ if g == 0:
+ return redcolor[r // 52]
+ if r == 0:
+ return greencolor[g // 52]
+ return 4
diff --git a/cros_utils/misc_test.py b/cros_utils/misc_test.py
index 21a545e9..9e2d1107 100755
--- a/cros_utils/misc_test.py
+++ b/cros_utils/misc_test.py
@@ -1,14 +1,13 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for misc."""
-from __future__ import print_function
-__author__ = 'asharif@google.com (Ahmad Sharif)'
+__author__ = "asharif@google.com (Ahmad Sharif)"
# System modules
import unittest
@@ -18,39 +17,51 @@ from cros_utils import misc
class UtilsTest(unittest.TestCase):
- """Tests for misc."""
-
- def testGetFilenameFromString(self):
- string = 'a /b=c"d^$?\\'
- filename = misc.GetFilenameFromString(string)
- self.assertEqual(filename, 'a___bcd')
-
- def testPrependMergeEnv(self):
- var = 'USE'
- use_flags = 'hello 123'
- added_use_flags = 'bla bla'
- env_string = '%s=%r' % (var, use_flags)
- new_env_string = misc.MergeEnvStringWithDict(env_string,
- {var: added_use_flags})
- expected_new_env = '%s=%r' % (var, ' '.join([added_use_flags, use_flags]))
- self.assertEqual(new_env_string, ' '.join([env_string, expected_new_env]))
-
- def testGetChromeOSVersionFromLSBVersion(self):
- versions_dict = {'2630.0.0': '22', '2030.0.0': '19'}
- f = misc.GetChromeOSVersionFromLSBVersion
- for k, v in versions_dict.items():
- self.assertEqual(f(k), 'R%s-%s' % (v, k))
-
- def testPostpendMergeEnv(self):
- var = 'USE'
- use_flags = 'hello 123'
- added_use_flags = 'bla bla'
- env_string = '%s=%r' % (var, use_flags)
- new_env_string = misc.MergeEnvStringWithDict(env_string,
- {var: added_use_flags}, False)
- expected_new_env = '%s=%r' % (var, ' '.join([use_flags, added_use_flags]))
- self.assertEqual(new_env_string, ' '.join([env_string, expected_new_env]))
-
-
-if __name__ == '__main__':
- unittest.main()
+ """Tests for misc."""
+
+ def testGetFilenameFromString(self):
+ string = 'a /b=c"d^$?\\'
+ filename = misc.GetFilenameFromString(string)
+ self.assertEqual(filename, "a___bcd")
+
+ def testPrependMergeEnv(self):
+ var = "USE"
+ use_flags = "hello 123"
+ added_use_flags = "bla bla"
+ env_string = "%s=%r" % (var, use_flags)
+ new_env_string = misc.MergeEnvStringWithDict(
+ env_string, {var: added_use_flags}
+ )
+ expected_new_env = "%s=%r" % (
+ var,
+ " ".join([added_use_flags, use_flags]),
+ )
+ self.assertEqual(
+ new_env_string, " ".join([env_string, expected_new_env])
+ )
+
+ def testGetChromeOSVersionFromLSBVersion(self):
+ versions_dict = {"2630.0.0": "22", "2030.0.0": "19"}
+ f = misc.GetChromeOSVersionFromLSBVersion
+ for k, v in versions_dict.items():
+ self.assertEqual(f(k), "R%s-%s" % (v, k))
+
+ def testPostpendMergeEnv(self):
+ var = "USE"
+ use_flags = "hello 123"
+ added_use_flags = "bla bla"
+ env_string = "%s=%r" % (var, use_flags)
+ new_env_string = misc.MergeEnvStringWithDict(
+ env_string, {var: added_use_flags}, False
+ )
+ expected_new_env = "%s=%r" % (
+ var,
+ " ".join([use_flags, added_use_flags]),
+ )
+ self.assertEqual(
+ new_env_string, " ".join([env_string, expected_new_env])
+ )
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/cros_utils/no_pseudo_terminal_test.py b/cros_utils/no_pseudo_terminal_test.py
index 10fd9608..acc90af4 100755
--- a/cros_utils/no_pseudo_terminal_test.py
+++ b/cros_utils/no_pseudo_terminal_test.py
@@ -1,64 +1,64 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Test to ensure we're not touching /dev/ptmx when running commands."""
-from __future__ import print_function
import os
import subprocess
import tempfile
import time
import unittest
+
from cros_utils import command_executer
class NoPsuedoTerminalTest(unittest.TestCase):
- """Test to ensure we're not touching /dev/ptmx when running commands."""
+ """Test to ensure we're not touching /dev/ptmx when running commands."""
- _strace_process = None
- STRACE_TIMEOUT = 10
+ _strace_process = None
+ STRACE_TIMEOUT = 10
- def _AttachStraceToSelf(self, output_file):
- """Attaches strace to the current process."""
- args = ['sudo', 'strace', '-o', output_file, '-p', str(os.getpid())]
- print(args)
- # pylint: disable=bad-option-value, subprocess-popen-preexec-fn
- self._strace_process = subprocess.Popen(args, preexec_fn=os.setpgrp)
- # Wait until we see some activity.
- start_time = time.time()
- while time.time() - start_time < self.STRACE_TIMEOUT:
- if os.path.isfile(output_file) and open(output_file).read(1):
- return True
- time.sleep(1)
- return False
+ def _AttachStraceToSelf(self, output_file):
+ """Attaches strace to the current process."""
+ args = ["sudo", "strace", "-o", output_file, "-p", str(os.getpid())]
+ print(args)
+ # pylint: disable=bad-option-value, subprocess-popen-preexec-fn
+ self._strace_process = subprocess.Popen(args, preexec_fn=os.setpgrp)
+ # Wait until we see some activity.
+ start_time = time.time()
+ while time.time() - start_time < self.STRACE_TIMEOUT:
+ if os.path.isfile(output_file) and open(output_file).read(1):
+ return True
+ time.sleep(1)
+ return False
- def _KillStraceProcess(self):
- """Kills strace that was started by _AttachStraceToSelf()."""
- pgid = os.getpgid(self._strace_process.pid)
- args = ['sudo', 'kill', str(pgid)]
- if subprocess.call(args) == 0:
- os.waitpid(pgid, 0)
- return True
- return False
+ def _KillStraceProcess(self):
+ """Kills strace that was started by _AttachStraceToSelf()."""
+ pgid = os.getpgid(self._strace_process.pid)
+ args = ["sudo", "kill", str(pgid)]
+ if subprocess.call(args) == 0:
+ os.waitpid(pgid, 0)
+ return True
+ return False
- def testNoPseudoTerminalWhenRunningCommand(self):
- """Test to make sure we're not touching /dev/ptmx when running commands."""
- temp_file = tempfile.mktemp()
- self.assertTrue(self._AttachStraceToSelf(temp_file))
+ def testNoPseudoTerminalWhenRunningCommand(self):
+ """Test to make sure we're not touching /dev/ptmx when running commands."""
+ temp_file = tempfile.mktemp()
+ self.assertTrue(self._AttachStraceToSelf(temp_file))
- ce = command_executer.GetCommandExecuter()
- ce.RunCommand('echo')
+ ce = command_executer.GetCommandExecuter()
+ ce.RunCommand("echo")
- self.assertTrue(self._KillStraceProcess())
+ self.assertTrue(self._KillStraceProcess())
- strace_contents = open(temp_file).read()
- self.assertFalse('/dev/ptmx' in strace_contents)
+ strace_contents = open(temp_file).read()
+ self.assertFalse("/dev/ptmx" in strace_contents)
-if __name__ == '__main__':
- unittest.main()
+if __name__ == "__main__":
+ unittest.main()
diff --git a/cros_utils/perf_diff.py b/cros_utils/perf_diff.py
index b8ddb0c4..6647b76a 100755
--- a/cros_utils/perf_diff.py
+++ b/cros_utils/perf_diff.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -9,9 +9,8 @@
A detailed description of perf_diff.
"""
-from __future__ import print_function
-__author__ = 'asharif@google.com (Ahmad Sharif)'
+__author__ = "asharif@google.com (Ahmad Sharif)"
import argparse
import functools
@@ -21,319 +20,338 @@ import sys
from cros_utils import misc
from cros_utils import tabulator
-ROWS_TO_SHOW = 'Rows_to_show_in_the_perf_table'
-TOTAL_EVENTS = 'Total_events_of_this_profile'
+
+ROWS_TO_SHOW = "Rows_to_show_in_the_perf_table"
+TOTAL_EVENTS = "Total_events_of_this_profile"
def GetPerfDictFromReport(report_file):
- output = {}
- perf_report = PerfReport(report_file)
- for k, v in perf_report.sections.items():
- if k not in output:
- output[k] = {}
- output[k][ROWS_TO_SHOW] = 0
- output[k][TOTAL_EVENTS] = 0
- for function in v.functions:
- out_key = '%s' % (function.name)
- output[k][out_key] = function.count
- output[k][TOTAL_EVENTS] += function.count
- if function.percent > 1:
- output[k][ROWS_TO_SHOW] += 1
- return output
+ output = {}
+ perf_report = PerfReport(report_file)
+ for k, v in perf_report.sections.items():
+ if k not in output:
+ output[k] = {}
+ output[k][ROWS_TO_SHOW] = 0
+ output[k][TOTAL_EVENTS] = 0
+ for function in v.functions:
+ out_key = "%s" % (function.name)
+ output[k][out_key] = function.count
+ output[k][TOTAL_EVENTS] += function.count
+ if function.percent > 1:
+ output[k][ROWS_TO_SHOW] += 1
+ return output
def _SortDictionaryByValue(d):
- l = d.items()
+ l = d.items()
- def GetFloat(x):
- if misc.IsFloat(x):
- return float(x)
- else:
- return x
+ def GetFloat(x):
+ if misc.IsFloat(x):
+ return float(x)
+ else:
+ return x
- sorted_l = sorted(l, key=lambda x: GetFloat(x[1]))
- sorted_l.reverse()
- return [f[0] for f in sorted_l]
+ sorted_l = sorted(l, key=lambda x: GetFloat(x[1]))
+ sorted_l.reverse()
+ return [f[0] for f in sorted_l]
class Tabulator(object):
- """Make tables."""
-
- def __init__(self, all_dicts):
- self._all_dicts = all_dicts
-
- def PrintTable(self):
- for dicts in self._all_dicts:
- self.PrintTableHelper(dicts)
-
- def PrintTableHelper(self, dicts):
- """Transfrom dicts to tables."""
- fields = {}
- for d in dicts:
- for f in d.keys():
- if f not in fields:
- fields[f] = d[f]
- else:
- fields[f] = max(fields[f], d[f])
- table = []
- header = ['name']
- for i in range(len(dicts)):
- header.append(i)
+ """Make tables."""
- table.append(header)
+ def __init__(self, all_dicts):
+ self._all_dicts = all_dicts
- sorted_fields = _SortDictionaryByValue(fields)
+ def PrintTable(self):
+ for dicts in self._all_dicts:
+ self.PrintTableHelper(dicts)
- for f in sorted_fields:
- row = [f]
- for d in dicts:
- if f in d:
- row.append(d[f])
- else:
- row.append('0')
- table.append(row)
+ def PrintTableHelper(self, dicts):
+ """Transfrom dicts to tables."""
+ fields = {}
+ for d in dicts:
+ for f in d.keys():
+ if f not in fields:
+ fields[f] = d[f]
+ else:
+ fields[f] = max(fields[f], d[f])
+ table = []
+ header = ["name"]
+ for i in range(len(dicts)):
+ header.append(i)
+
+ table.append(header)
- print(tabulator.GetSimpleTable(table))
+ sorted_fields = _SortDictionaryByValue(fields)
+
+ for f in sorted_fields:
+ row = [f]
+ for d in dicts:
+ if f in d:
+ row.append(d[f])
+ else:
+ row.append("0")
+ table.append(row)
+
+ print(tabulator.GetSimpleTable(table))
class Function(object):
- """Function for formatting."""
+ """Function for formatting."""
- def __init__(self):
- self.count = 0
- self.name = ''
- self.percent = 0
+ def __init__(self):
+ self.count = 0
+ self.name = ""
+ self.percent = 0
class Section(object):
- """Section formatting."""
-
- def __init__(self, contents):
- self.name = ''
- self.raw_contents = contents
- self._ParseSection()
-
- def _ParseSection(self):
- matches = re.findall(r'Events: (\w+)\s+(.*)', self.raw_contents)
- assert len(matches) <= 1, 'More than one event found in 1 section'
- if not matches:
- return
- match = matches[0]
- self.name = match[1]
- self.count = misc.UnitToNumber(match[0])
-
- self.functions = []
- for line in self.raw_contents.splitlines():
- if not line.strip():
- continue
- if '%' not in line:
- continue
- if not line.startswith('#'):
- fields = [f for f in line.split(' ') if f]
- function = Function()
- function.percent = float(fields[0].strip('%'))
- function.count = int(fields[1])
- function.name = ' '.join(fields[2:])
- self.functions.append(function)
+ """Section formatting."""
+
+ def __init__(self, contents):
+ self.name = ""
+ self.raw_contents = contents
+ self._ParseSection()
+
+ def _ParseSection(self):
+ matches = re.findall(r"Events: (\w+)\s+(.*)", self.raw_contents)
+ assert len(matches) <= 1, "More than one event found in 1 section"
+ if not matches:
+ return
+ match = matches[0]
+ self.name = match[1]
+ self.count = misc.UnitToNumber(match[0])
+
+ self.functions = []
+ for line in self.raw_contents.splitlines():
+ if not line.strip():
+ continue
+ if "%" not in line:
+ continue
+ if not line.startswith("#"):
+ fields = [f for f in line.split(" ") if f]
+ function = Function()
+ function.percent = float(fields[0].strip("%"))
+ function.count = int(fields[1])
+ function.name = " ".join(fields[2:])
+ self.functions.append(function)
class PerfReport(object):
- """Get report from raw report."""
-
- def __init__(self, perf_file):
- self.perf_file = perf_file
- self._ReadFile()
- self.sections = {}
- self.metadata = {}
- self._section_contents = []
- self._section_header = ''
- self._SplitSections()
- self._ParseSections()
- self._ParseSectionHeader()
-
- def _ParseSectionHeader(self):
- """Parse a header of a perf report file."""
- # The "captured on" field is inaccurate - this actually refers to when the
- # report was generated, not when the data was captured.
- for line in self._section_header.splitlines():
- line = line[2:]
- if ':' in line:
- key, val = line.strip().split(':', 1)
- key = key.strip()
- val = val.strip()
- self.metadata[key] = val
-
- def _ReadFile(self):
- self._perf_contents = open(self.perf_file).read()
-
- def _ParseSections(self):
- self.event_counts = {}
- self.sections = {}
- for section_content in self._section_contents:
- section = Section(section_content)
- section.name = self._GetHumanReadableName(section.name)
- self.sections[section.name] = section
-
- # TODO(asharif): Do this better.
- def _GetHumanReadableName(self, section_name):
- if not 'raw' in section_name:
- return section_name
- raw_number = section_name.strip().split(' ')[-1]
- for line in self._section_header.splitlines():
- if raw_number in line:
- name = line.strip().split(' ')[5]
- return name
-
- def _SplitSections(self):
- self._section_contents = []
- indices = [m.start() for m in re.finditer('# Events:', self._perf_contents)]
- indices.append(len(self._perf_contents))
- for i in range(len(indices) - 1):
- section_content = self._perf_contents[indices[i]:indices[i + 1]]
- self._section_contents.append(section_content)
- self._section_header = ''
- if indices:
- self._section_header = self._perf_contents[0:indices[0]]
+ """Get report from raw report."""
+
+ def __init__(self, perf_file):
+ self.perf_file = perf_file
+ self._ReadFile()
+ self.sections = {}
+ self.metadata = {}
+ self._section_contents = []
+ self._section_header = ""
+ self._SplitSections()
+ self._ParseSections()
+ self._ParseSectionHeader()
+
+ def _ParseSectionHeader(self):
+ """Parse a header of a perf report file."""
+ # The "captured on" field is inaccurate - this actually refers to when the
+ # report was generated, not when the data was captured.
+ for line in self._section_header.splitlines():
+ line = line[2:]
+ if ":" in line:
+ key, val = line.strip().split(":", 1)
+ key = key.strip()
+ val = val.strip()
+ self.metadata[key] = val
+
+ def _ReadFile(self):
+ self._perf_contents = open(self.perf_file).read()
+
+ def _ParseSections(self):
+ self.event_counts = {}
+ self.sections = {}
+ for section_content in self._section_contents:
+ section = Section(section_content)
+ section.name = self._GetHumanReadableName(section.name)
+ self.sections[section.name] = section
+
+ # TODO(asharif): Do this better.
+ def _GetHumanReadableName(self, section_name):
+ if not "raw" in section_name:
+ return section_name
+ raw_number = section_name.strip().split(" ")[-1]
+ for line in self._section_header.splitlines():
+ if raw_number in line:
+ name = line.strip().split(" ")[5]
+ return name
+
+ def _SplitSections(self):
+ self._section_contents = []
+ indices = [
+ m.start() for m in re.finditer("# Events:", self._perf_contents)
+ ]
+ indices.append(len(self._perf_contents))
+ for i in range(len(indices) - 1):
+ section_content = self._perf_contents[indices[i] : indices[i + 1]]
+ self._section_contents.append(section_content)
+ self._section_header = ""
+ if indices:
+ self._section_header = self._perf_contents[0 : indices[0]]
class PerfDiffer(object):
- """Perf differ class."""
-
- def __init__(self, reports, num_symbols, common_only):
- self._reports = reports
- self._num_symbols = num_symbols
- self._common_only = common_only
- self._common_function_names = {}
-
- def DoDiff(self):
- """The function that does the diff."""
- section_names = self._FindAllSections()
-
- filename_dicts = []
- summary_dicts = []
- for report in self._reports:
- d = {}
- filename_dicts.append({'file': report.perf_file})
- for section_name in section_names:
- if section_name in report.sections:
- d[section_name] = report.sections[section_name].count
- summary_dicts.append(d)
-
- all_dicts = [filename_dicts, summary_dicts]
-
- for section_name in section_names:
- function_names = self._GetTopFunctions(section_name, self._num_symbols)
- self._FindCommonFunctions(section_name)
- dicts = []
- for report in self._reports:
+ """Perf differ class."""
+
+ def __init__(self, reports, num_symbols, common_only):
+ self._reports = reports
+ self._num_symbols = num_symbols
+ self._common_only = common_only
+ self._common_function_names = {}
+
+ def DoDiff(self):
+ """The function that does the diff."""
+ section_names = self._FindAllSections()
+
+ filename_dicts = []
+ summary_dicts = []
+ for report in self._reports:
+ d = {}
+ filename_dicts.append({"file": report.perf_file})
+ for section_name in section_names:
+ if section_name in report.sections:
+ d[section_name] = report.sections[section_name].count
+ summary_dicts.append(d)
+
+ all_dicts = [filename_dicts, summary_dicts]
+
+ for section_name in section_names:
+ function_names = self._GetTopFunctions(
+ section_name, self._num_symbols
+ )
+ self._FindCommonFunctions(section_name)
+ dicts = []
+ for report in self._reports:
+ d = {}
+ if section_name in report.sections:
+ section = report.sections[section_name]
+
+ # Get a common scaling factor for this report.
+ common_scaling_factor = self._GetCommonScalingFactor(
+ section
+ )
+
+ for function in section.functions:
+ if function.name in function_names:
+ key = "%s %s" % (section.name, function.name)
+ d[key] = function.count
+ # Compute a factor to scale the function count by in common_only
+ # mode.
+ if self._common_only and (
+ function.name
+ in self._common_function_names[section.name]
+ ):
+ d[key + " scaled"] = (
+ common_scaling_factor * function.count
+ )
+ dicts.append(d)
+
+ all_dicts.append(dicts)
+
+ mytabulator = Tabulator(all_dicts)
+ mytabulator.PrintTable()
+
+ def _FindAllSections(self):
+ sections = {}
+ for report in self._reports:
+ for section in report.sections.values():
+ if section.name not in sections:
+ sections[section.name] = section.count
+ else:
+ sections[section.name] = max(
+ sections[section.name], section.count
+ )
+ return _SortDictionaryByValue(sections)
+
+ def _GetCommonScalingFactor(self, section):
+ unique_count = self._GetCount(
+ section, lambda x: x in self._common_function_names[section.name]
+ )
+ return 100.0 / unique_count
+
+ def _GetCount(self, section, filter_fun=None):
+ total_count = 0
+ for function in section.functions:
+ if not filter_fun or filter_fun(function.name):
+ total_count += int(function.count)
+ return total_count
+
+ def _FindCommonFunctions(self, section_name):
+ function_names_list = []
+ for report in self._reports:
+ if section_name in report.sections:
+ section = report.sections[section_name]
+ function_names = {f.name for f in section.functions}
+ function_names_list.append(function_names)
+
+ self._common_function_names[section_name] = functools.reduce(
+ set.intersection, function_names_list
+ )
+
+ def _GetTopFunctions(self, section_name, num_functions):
+ all_functions = {}
+ for report in self._reports:
+ if section_name in report.sections:
+ section = report.sections[section_name]
+ for f in section.functions[:num_functions]:
+ if f.name in all_functions:
+ all_functions[f.name] = max(
+ all_functions[f.name], f.count
+ )
+ else:
+ all_functions[f.name] = f.count
+ # FIXME(asharif): Don't really need to sort these...
+ return _SortDictionaryByValue(all_functions)
+
+ def _GetFunctionsDict(self, section, function_names):
d = {}
- if section_name in report.sections:
- section = report.sections[section_name]
-
- # Get a common scaling factor for this report.
- common_scaling_factor = self._GetCommonScalingFactor(section)
-
- for function in section.functions:
+ for function in section.functions:
if function.name in function_names:
- key = '%s %s' % (section.name, function.name)
- d[key] = function.count
- # Compute a factor to scale the function count by in common_only
- # mode.
- if self._common_only and (
- function.name in self._common_function_names[section.name]):
- d[key + ' scaled'] = common_scaling_factor * function.count
- dicts.append(d)
-
- all_dicts.append(dicts)
-
- mytabulator = Tabulator(all_dicts)
- mytabulator.PrintTable()
-
- def _FindAllSections(self):
- sections = {}
- for report in self._reports:
- for section in report.sections.values():
- if section.name not in sections:
- sections[section.name] = section.count
- else:
- sections[section.name] = max(sections[section.name], section.count)
- return _SortDictionaryByValue(sections)
-
- def _GetCommonScalingFactor(self, section):
- unique_count = self._GetCount(
- section, lambda x: x in self._common_function_names[section.name])
- return 100.0 / unique_count
-
- def _GetCount(self, section, filter_fun=None):
- total_count = 0
- for function in section.functions:
- if not filter_fun or filter_fun(function.name):
- total_count += int(function.count)
- return total_count
-
- def _FindCommonFunctions(self, section_name):
- function_names_list = []
- for report in self._reports:
- if section_name in report.sections:
- section = report.sections[section_name]
- function_names = {f.name for f in section.functions}
- function_names_list.append(function_names)
-
- self._common_function_names[section_name] = (
- functools.reduce(set.intersection, function_names_list))
-
- def _GetTopFunctions(self, section_name, num_functions):
- all_functions = {}
- for report in self._reports:
- if section_name in report.sections:
- section = report.sections[section_name]
- for f in section.functions[:num_functions]:
- if f.name in all_functions:
- all_functions[f.name] = max(all_functions[f.name], f.count)
- else:
- all_functions[f.name] = f.count
- # FIXME(asharif): Don't really need to sort these...
- return _SortDictionaryByValue(all_functions)
-
- def _GetFunctionsDict(self, section, function_names):
- d = {}
- for function in section.functions:
- if function.name in function_names:
- d[function.name] = function.count
- return d
+ d[function.name] = function.count
+ return d
def Main(argv):
- """The entry of the main."""
- parser = argparse.ArgumentParser()
- parser.add_argument(
- '-n',
- '--num_symbols',
- dest='num_symbols',
- default='5',
- help='The number of symbols to show.')
- parser.add_argument(
- '-c',
- '--common_only',
- dest='common_only',
- action='store_true',
- default=False,
- help='Diff common symbols only.')
-
- options, args = parser.parse_known_args(argv)
-
- try:
- reports = []
- for report in args[1:]:
- report = PerfReport(report)
- reports.append(report)
- pd = PerfDiffer(reports, int(options.num_symbols), options.common_only)
- pd.DoDiff()
- finally:
- pass
-
- return 0
-
-
-if __name__ == '__main__':
- sys.exit(Main(sys.argv))
+ """The entry of the main."""
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "-n",
+ "--num_symbols",
+ dest="num_symbols",
+ default="5",
+ help="The number of symbols to show.",
+ )
+ parser.add_argument(
+ "-c",
+ "--common_only",
+ dest="common_only",
+ action="store_true",
+ default=False,
+ help="Diff common symbols only.",
+ )
+
+ options, args = parser.parse_known_args(argv)
+
+ try:
+ reports = []
+ for report in args[1:]:
+ report = PerfReport(report)
+ reports.append(report)
+ pd = PerfDiffer(reports, int(options.num_symbols), options.common_only)
+ pd.DoDiff()
+ finally:
+ pass
+
+ return 0
+
+
+if __name__ == "__main__":
+ sys.exit(Main(sys.argv))
diff --git a/cros_utils/tabulator.py b/cros_utils/tabulator.py
index 1a3fd4a7..d079ea22 100644
--- a/cros_utils/tabulator.py
+++ b/cros_utils/tabulator.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Copyright 2013 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -61,966 +61,1024 @@ table:
print tp.Print()
"""
-from __future__ import division
-from __future__ import print_function
import collections
import getpass
import math
import statistics
import sys
+
+from cros_utils import misc
+from cros_utils.email_sender import EmailSender
+
# TODO(crbug.com/980719): Drop scipy in the future.
# pylint: disable=import-error
import scipy
-from cros_utils.email_sender import EmailSender
-from cros_utils import misc
-
def _AllFloat(values):
- return all([misc.IsFloat(v) for v in values])
+ return all([misc.IsFloat(v) for v in values])
def _GetFloats(values):
- return [float(v) for v in values]
+ return [float(v) for v in values]
def _StripNone(results):
- res = []
- for result in results:
- if result is not None:
- res.append(result)
- return res
+ res = []
+ for result in results:
+ if result is not None:
+ res.append(result)
+ return res
def _RemoveMinMax(cell, values):
- if len(values) < 3:
- print('WARNING: Values count is less than 3, not ignoring min/max values')
- print('WARNING: Cell name:', cell.name, 'Values:', values)
- return values
+ if len(values) < 3:
+ print(
+ "WARNING: Values count is less than 3, not ignoring min/max values"
+ )
+ print("WARNING: Cell name:", cell.name, "Values:", values)
+ return values
- values.remove(min(values))
- values.remove(max(values))
- return values
+ values.remove(min(values))
+ values.remove(max(values))
+ return values
class TableGenerator(object):
- """Creates a table from a list of list of dicts.
-
- The main public function is called GetTable().
- """
- SORT_BY_KEYS = 0
- SORT_BY_KEYS_DESC = 1
- SORT_BY_VALUES = 2
- SORT_BY_VALUES_DESC = 3
- NO_SORT = 4
-
- MISSING_VALUE = 'x'
-
- def __init__(self, d, l, sort=NO_SORT, key_name='keys'):
- self._runs = d
- self._labels = l
- self._sort = sort
- self._key_name = key_name
-
- def _AggregateKeys(self):
- keys = collections.OrderedDict()
- for run_list in self._runs:
- for run in run_list:
- keys.update(dict.fromkeys(run.keys()))
- return list(keys.keys())
-
- def _GetHighestValue(self, key):
- values = []
- for run_list in self._runs:
- for run in run_list:
- if key in run:
- values.append(run[key])
- values = _StripNone(values)
- if _AllFloat(values):
- values = _GetFloats(values)
- return max(values)
-
- def _GetLowestValue(self, key):
- values = []
- for run_list in self._runs:
- for run in run_list:
- if key in run:
- values.append(run[key])
- values = _StripNone(values)
- if _AllFloat(values):
- values = _GetFloats(values)
- return min(values)
-
- def _SortKeys(self, keys):
- if self._sort == self.SORT_BY_KEYS:
- return sorted(keys)
- elif self._sort == self.SORT_BY_VALUES:
- # pylint: disable=unnecessary-lambda
- return sorted(keys, key=lambda x: self._GetLowestValue(x))
- elif self._sort == self.SORT_BY_VALUES_DESC:
- # pylint: disable=unnecessary-lambda
- return sorted(keys, key=lambda x: self._GetHighestValue(x), reverse=True)
- elif self._sort == self.NO_SORT:
- return keys
- else:
- assert 0, 'Unimplemented sort %s' % self._sort
-
- def _GetKeys(self):
- keys = self._AggregateKeys()
- return self._SortKeys(keys)
-
- def GetTable(self, number_of_rows=sys.maxsize):
- """Returns a table from a list of list of dicts.
+ """Creates a table from a list of list of dicts.
- Examples:
- We have the following runs:
- [[{"k1": "v1", "k2": "v2"}, {"k1": "v3"}],
- [{"k1": "v4", "k4": "v5"}]]
- and the following labels:
- ["vanilla", "modified"]
- it will return:
- [["Key", "vanilla", "modified"]
- ["k1", ["v1", "v3"], ["v4"]]
- ["k2", ["v2"], []]
- ["k4", [], ["v5"]]]
- The returned table can then be processed further by other classes in this
- module.
-
- The list of list of dicts is passed into the constructor of TableGenerator.
- This method converts that into a canonical list of lists which represents a
- table of values.
-
- Args:
- number_of_rows: Maximum number of rows to return from the table.
-
- Returns:
- A list of lists which is the table.
+ The main public function is called GetTable().
"""
- keys = self._GetKeys()
- header = [self._key_name] + self._labels
- table = [header]
- rows = 0
- for k in keys:
- row = [k]
- unit = None
- for run_list in self._runs:
- v = []
- for run in run_list:
- if k in run:
- if isinstance(run[k], list):
- val = run[k][0]
- unit = run[k][1]
- else:
- val = run[k]
- v.append(val)
- else:
- v.append(None)
- row.append(v)
- # If we got a 'unit' value, append the units name to the key name.
- if unit:
- keyname = row[0] + ' (%s) ' % unit
- row[0] = keyname
- table.append(row)
- rows += 1
- if rows == number_of_rows:
- break
- return table
-
-
-class SamplesTableGenerator(TableGenerator):
- """Creates a table with only samples from the results
-
- The main public function is called GetTable().
-
- Different than TableGenerator, self._runs is now a dict of {benchmark: runs}
- We are expecting there is 'samples' in `runs`.
- """
-
- def __init__(self, run_keyvals, label_list, iter_counts, weights):
- TableGenerator.__init__(
- self, run_keyvals, label_list, key_name='Benchmarks')
- self._iter_counts = iter_counts
- self._weights = weights
-
- def _GetKeys(self):
- keys = self._runs.keys()
- return self._SortKeys(keys)
- def GetTable(self, number_of_rows=sys.maxsize):
- """Returns a tuple, which contains three args:
-
- 1) a table from a list of list of dicts.
- 2) updated benchmark_results run_keyvals with composite benchmark
- 3) updated benchmark_results iter_count with composite benchmark
+ SORT_BY_KEYS = 0
+ SORT_BY_KEYS_DESC = 1
+ SORT_BY_VALUES = 2
+ SORT_BY_VALUES_DESC = 3
+ NO_SORT = 4
+
+ MISSING_VALUE = "x"
+
+ def __init__(self, d, l, sort=NO_SORT, key_name="keys"):
+ self._runs = d
+ self._labels = l
+ self._sort = sort
+ self._key_name = key_name
+
+ def _AggregateKeys(self):
+ keys = collections.OrderedDict()
+ for run_list in self._runs:
+ for run in run_list:
+ keys.update(dict.fromkeys(run.keys()))
+ return list(keys.keys())
+
+ def _GetHighestValue(self, key):
+ values = []
+ for run_list in self._runs:
+ for run in run_list:
+ if key in run:
+ values.append(run[key])
+ values = _StripNone(values)
+ if _AllFloat(values):
+ values = _GetFloats(values)
+ return max(values)
+
+ def _GetLowestValue(self, key):
+ values = []
+ for run_list in self._runs:
+ for run in run_list:
+ if key in run:
+ values.append(run[key])
+ values = _StripNone(values)
+ if _AllFloat(values):
+ values = _GetFloats(values)
+ return min(values)
+
+ def _SortKeys(self, keys):
+ if self._sort == self.SORT_BY_KEYS:
+ return sorted(keys)
+ elif self._sort == self.SORT_BY_VALUES:
+ # pylint: disable=unnecessary-lambda
+ return sorted(keys, key=lambda x: self._GetLowestValue(x))
+ elif self._sort == self.SORT_BY_VALUES_DESC:
+ # pylint: disable=unnecessary-lambda
+ return sorted(
+ keys, key=lambda x: self._GetHighestValue(x), reverse=True
+ )
+ elif self._sort == self.NO_SORT:
+ return keys
+ else:
+ assert 0, "Unimplemented sort %s" % self._sort
+
+ def _GetKeys(self):
+ keys = self._AggregateKeys()
+ return self._SortKeys(keys)
+
+ def GetTable(self, number_of_rows=sys.maxsize):
+ """Returns a table from a list of list of dicts.
+
+ Examples:
+ We have the following runs:
+ [[{"k1": "v1", "k2": "v2"}, {"k1": "v3"}],
+ [{"k1": "v4", "k4": "v5"}]]
+ and the following labels:
+ ["vanilla", "modified"]
+ it will return:
+ [["Key", "vanilla", "modified"]
+ ["k1", ["v1", "v3"], ["v4"]]
+ ["k2", ["v2"], []]
+ ["k4", [], ["v5"]]]
+ The returned table can then be processed further by other classes in this
+ module.
+
+ The list of list of dicts is passed into the constructor of TableGenerator.
+ This method converts that into a canonical list of lists which represents a
+ table of values.
+
+ Args:
+ number_of_rows: Maximum number of rows to return from the table.
+
+ Returns:
+ A list of lists which is the table.
+ """
+ keys = self._GetKeys()
+ header = [self._key_name] + self._labels
+ table = [header]
+ rows = 0
+ for k in keys:
+ row = [k]
+ unit = None
+ for run_list in self._runs:
+ v = []
+ for run in run_list:
+ if k in run:
+ if isinstance(run[k], list):
+ val = run[k][0]
+ unit = run[k][1]
+ else:
+ val = run[k]
+ v.append(val)
+ else:
+ v.append(None)
+ row.append(v)
+ # If we got a 'unit' value, append the units name to the key name.
+ if unit:
+ keyname = row[0] + " (%s) " % unit
+ row[0] = keyname
+ table.append(row)
+ rows += 1
+ if rows == number_of_rows:
+ break
+ return table
- The dict of list of list of dicts is passed into the constructor of
- SamplesTableGenerator.
- This method converts that into a canonical list of lists which
- represents a table of values.
- Examples:
- We have the following runs:
- {bench1: [[{"samples": "v1"}, {"samples": "v2"}],
- [{"samples": "v3"}, {"samples": "v4"}]]
- bench2: [[{"samples": "v21"}, None],
- [{"samples": "v22"}, {"samples": "v23"}]]}
- and weights of benchmarks:
- {bench1: w1, bench2: w2}
- and the following labels:
- ["vanilla", "modified"]
- it will return:
- [["Benchmark", "Weights", "vanilla", "modified"]
- ["bench1", w1,
- ((2, 0), ["v1*w1", "v2*w1"]), ((2, 0), ["v3*w1", "v4*w1"])]
- ["bench2", w2,
- ((1, 1), ["v21*w2", None]), ((2, 0), ["v22*w2", "v23*w2"])]
- ["Composite Benchmark", N/A,
- ((1, 1), ["v1*w1+v21*w2", None]),
- ((2, 0), ["v3*w1+v22*w2", "v4*w1+ v23*w2"])]]
- The returned table can then be processed further by other classes in this
- module.
+class SamplesTableGenerator(TableGenerator):
+ """Creates a table with only samples from the results
- Args:
- number_of_rows: Maximum number of rows to return from the table.
+ The main public function is called GetTable().
- Returns:
- A list of lists which is the table.
+ Different than TableGenerator, self._runs is now a dict of {benchmark: runs}
+ We are expecting there is 'samples' in `runs`.
"""
- keys = self._GetKeys()
- header = [self._key_name, 'Weights'] + self._labels
- table = [header]
- rows = 0
- iterations = 0
-
- for k in keys:
- bench_runs = self._runs[k]
- unit = None
- all_runs_empty = all(not dict for label in bench_runs for dict in label)
- if all_runs_empty:
- cell = Cell()
- cell.string_value = ('Benchmark %s contains no result.'
- ' Is the benchmark name valid?' % k)
- table.append([cell])
- else:
- row = [k]
- row.append(self._weights[k])
- for run_list in bench_runs:
- run_pass = 0
- run_fail = 0
- v = []
- for run in run_list:
- if 'samples' in run:
- if isinstance(run['samples'], list):
- val = run['samples'][0] * self._weights[k]
- unit = run['samples'][1]
- else:
- val = run['samples'] * self._weights[k]
- v.append(val)
- run_pass += 1
+
+ def __init__(self, run_keyvals, label_list, iter_counts, weights):
+ TableGenerator.__init__(
+ self, run_keyvals, label_list, key_name="Benchmarks"
+ )
+ self._iter_counts = iter_counts
+ self._weights = weights
+
+ def _GetKeys(self):
+ keys = self._runs.keys()
+ return self._SortKeys(keys)
+
+ def GetTable(self, number_of_rows=sys.maxsize):
+ """Returns a tuple, which contains three args:
+
+ 1) a table from a list of list of dicts.
+ 2) updated benchmark_results run_keyvals with composite benchmark
+ 3) updated benchmark_results iter_count with composite benchmark
+
+ The dict of list of list of dicts is passed into the constructor of
+ SamplesTableGenerator.
+ This method converts that into a canonical list of lists which
+ represents a table of values.
+
+ Examples:
+ We have the following runs:
+ {bench1: [[{"samples": "v1"}, {"samples": "v2"}],
+ [{"samples": "v3"}, {"samples": "v4"}]]
+ bench2: [[{"samples": "v21"}, None],
+ [{"samples": "v22"}, {"samples": "v23"}]]}
+ and weights of benchmarks:
+ {bench1: w1, bench2: w2}
+ and the following labels:
+ ["vanilla", "modified"]
+ it will return:
+ [["Benchmark", "Weights", "vanilla", "modified"]
+ ["bench1", w1,
+ ((2, 0), ["v1*w1", "v2*w1"]), ((2, 0), ["v3*w1", "v4*w1"])]
+ ["bench2", w2,
+ ((1, 1), ["v21*w2", None]), ((2, 0), ["v22*w2", "v23*w2"])]
+ ["Composite Benchmark", N/A,
+ ((1, 1), ["v1*w1+v21*w2", None]),
+ ((2, 0), ["v3*w1+v22*w2", "v4*w1+ v23*w2"])]]
+ The returned table can then be processed further by other classes in this
+ module.
+
+ Args:
+ number_of_rows: Maximum number of rows to return from the table.
+
+ Returns:
+ A list of lists which is the table.
+ """
+ keys = self._GetKeys()
+ header = [self._key_name, "Weights"] + self._labels
+ table = [header]
+ rows = 0
+ iterations = 0
+
+ for k in keys:
+ bench_runs = self._runs[k]
+ unit = None
+ all_runs_empty = all(
+ not dict for label in bench_runs for dict in label
+ )
+ if all_runs_empty:
+ cell = Cell()
+ cell.string_value = (
+ "Benchmark %s contains no result."
+ " Is the benchmark name valid?" % k
+ )
+ table.append([cell])
else:
- v.append(None)
- run_fail += 1
- one_tuple = ((run_pass, run_fail), v)
- if iterations not in (0, run_pass + run_fail):
- raise ValueError('Iterations of each benchmark run ' \
- 'are not the same')
- iterations = run_pass + run_fail
- row.append(one_tuple)
- if unit:
- keyname = row[0] + ' (%s) ' % unit
- row[0] = keyname
- table.append(row)
- rows += 1
- if rows == number_of_rows:
- break
-
- k = 'Composite Benchmark'
- if k in keys:
- raise RuntimeError('Composite benchmark already exists in results')
-
- # Create a new composite benchmark row at the bottom of the summary table
- # The new row will be like the format in example:
- # ["Composite Benchmark", N/A,
- # ((1, 1), ["v1*w1+v21*w2", None]),
- # ((2, 0), ["v3*w1+v22*w2", "v4*w1+ v23*w2"])]]
- # First we will create a row of [key, weight, [[0] * iterations] * labels]
- row = [None] * len(header)
- row[0] = '%s (samples)' % k
- row[1] = 'N/A'
- for label_index in range(2, len(row)):
- row[label_index] = [0] * iterations
-
- for cur_row in table[1:]:
- # Iterate through each benchmark
- if len(cur_row) > 1:
- for label_index in range(2, len(cur_row)):
- # Iterate through each run in a single benchmark
- # each result should look like ((pass, fail), [values_list])
- bench_runs = cur_row[label_index][1]
- for index in range(iterations):
- # Accumulate each run result to composite benchmark run
- # If any run fails, then we set this run for composite benchmark
- # to None so that we know it fails.
- if bench_runs[index] and row[label_index][index] is not None:
- row[label_index][index] += bench_runs[index]
+ row = [k]
+ row.append(self._weights[k])
+ for run_list in bench_runs:
+ run_pass = 0
+ run_fail = 0
+ v = []
+ for run in run_list:
+ if "samples" in run:
+ if isinstance(run["samples"], list):
+ val = run["samples"][0] * self._weights[k]
+ unit = run["samples"][1]
+ else:
+ val = run["samples"] * self._weights[k]
+ v.append(val)
+ run_pass += 1
+ else:
+ v.append(None)
+ run_fail += 1
+ one_tuple = ((run_pass, run_fail), v)
+ if iterations not in (0, run_pass + run_fail):
+ raise ValueError(
+ "Iterations of each benchmark run "
+ "are not the same"
+ )
+ iterations = run_pass + run_fail
+ row.append(one_tuple)
+ if unit:
+ keyname = row[0] + " (%s) " % unit
+ row[0] = keyname
+ table.append(row)
+ rows += 1
+ if rows == number_of_rows:
+ break
+
+ k = "Composite Benchmark"
+ if k in keys:
+ raise RuntimeError("Composite benchmark already exists in results")
+
+ # Create a new composite benchmark row at the bottom of the summary table
+ # The new row will be like the format in example:
+ # ["Composite Benchmark", N/A,
+ # ((1, 1), ["v1*w1+v21*w2", None]),
+ # ((2, 0), ["v3*w1+v22*w2", "v4*w1+ v23*w2"])]]
+ # First we will create a row of [key, weight, [[0] * iterations] * labels]
+ row = [None] * len(header)
+ row[0] = "%s (samples)" % k
+ row[1] = "N/A"
+ for label_index in range(2, len(row)):
+ row[label_index] = [0] * iterations
+
+ for cur_row in table[1:]:
+ # Iterate through each benchmark
+ if len(cur_row) > 1:
+ for label_index in range(2, len(cur_row)):
+ # Iterate through each run in a single benchmark
+ # each result should look like ((pass, fail), [values_list])
+ bench_runs = cur_row[label_index][1]
+ for index in range(iterations):
+ # Accumulate each run result to composite benchmark run
+ # If any run fails, then we set this run for composite benchmark
+ # to None so that we know it fails.
+ if (
+ bench_runs[index]
+ and row[label_index][index] is not None
+ ):
+ row[label_index][index] += bench_runs[index]
+ else:
+ row[label_index][index] = None
else:
- row[label_index][index] = None
- else:
- # One benchmark totally fails, no valid data will be in final result
+ # One benchmark totally fails, no valid data will be in final result
+ for label_index in range(2, len(row)):
+ row[label_index] = [None] * iterations
+ break
+ # Calculate pass and fail count for composite benchmark
for label_index in range(2, len(row)):
- row[label_index] = [None] * iterations
- break
- # Calculate pass and fail count for composite benchmark
- for label_index in range(2, len(row)):
- run_pass = 0
- run_fail = 0
- for run in row[label_index]:
- if run:
- run_pass += 1
- else:
- run_fail += 1
- row[label_index] = ((run_pass, run_fail), row[label_index])
- table.append(row)
-
- # Now that we have the table genearted, we want to store this new composite
- # benchmark into the benchmark_result in ResultReport object.
- # This will be used to generate a full table which contains our composite
- # benchmark.
- # We need to create composite benchmark result and add it to keyvals in
- # benchmark_results.
- v = []
- for label in row[2:]:
- # each label's result looks like ((pass, fail), [values])
- benchmark_runs = label[1]
- # List of values of each label
- single_run_list = []
- for run in benchmark_runs:
- # Result of each run under the same label is a dict of keys.
- # Here the only key we will add for composite benchmark is the
- # weighted_samples we added up.
- one_dict = {}
- if run:
- one_dict[u'weighted_samples'] = [run, u'samples']
- one_dict['retval'] = 0
- else:
- one_dict['retval'] = 1
- single_run_list.append(one_dict)
- v.append(single_run_list)
-
- self._runs[k] = v
- self._iter_counts[k] = iterations
+ run_pass = 0
+ run_fail = 0
+ for run in row[label_index]:
+ if run:
+ run_pass += 1
+ else:
+ run_fail += 1
+ row[label_index] = ((run_pass, run_fail), row[label_index])
+ table.append(row)
- return (table, self._runs, self._iter_counts)
+ # Now that we have the table genearted, we want to store this new composite
+ # benchmark into the benchmark_result in ResultReport object.
+ # This will be used to generate a full table which contains our composite
+ # benchmark.
+ # We need to create composite benchmark result and add it to keyvals in
+ # benchmark_results.
+ v = []
+ for label in row[2:]:
+ # each label's result looks like ((pass, fail), [values])
+ benchmark_runs = label[1]
+ # List of values of each label
+ single_run_list = []
+ for run in benchmark_runs:
+ # Result of each run under the same label is a dict of keys.
+ # Here the only key we will add for composite benchmark is the
+ # weighted_samples we added up.
+ one_dict = {}
+ if run:
+ one_dict[u"weighted_samples"] = [run, u"samples"]
+ one_dict["retval"] = 0
+ else:
+ one_dict["retval"] = 1
+ single_run_list.append(one_dict)
+ v.append(single_run_list)
+
+ self._runs[k] = v
+ self._iter_counts[k] = iterations
+
+ return (table, self._runs, self._iter_counts)
class Result(object):
- """A class that respresents a single result.
-
- This single result is obtained by condensing the information from a list of
- runs and a list of baseline runs.
- """
-
- def __init__(self):
- pass
-
- def _AllStringsSame(self, values):
- values_set = set(values)
- return len(values_set) == 1
-
- def NeedsBaseline(self):
- return False
-
- # pylint: disable=unused-argument
- def _Literal(self, cell, values, baseline_values):
- cell.value = ' '.join([str(v) for v in values])
-
- def _ComputeFloat(self, cell, values, baseline_values):
- self._Literal(cell, values, baseline_values)
-
- def _ComputeString(self, cell, values, baseline_values):
- self._Literal(cell, values, baseline_values)
-
- def _InvertIfLowerIsBetter(self, cell):
- pass
+ """A class that respresents a single result.
- def _GetGmean(self, values):
- if not values:
- return float('nan')
- if any([v < 0 for v in values]):
- return float('nan')
- if any([v == 0 for v in values]):
- return 0.0
- log_list = [math.log(v) for v in values]
- gmean_log = sum(log_list) / len(log_list)
- return math.exp(gmean_log)
-
- def Compute(self, cell, values, baseline_values):
- """Compute the result given a list of values and baseline values.
-
- Args:
- cell: A cell data structure to populate.
- values: List of values.
- baseline_values: List of baseline values. Can be none if this is the
- baseline itself.
+ This single result is obtained by condensing the information from a list of
+ runs and a list of baseline runs.
"""
- all_floats = True
- values = _StripNone(values)
- if not values:
- cell.value = ''
- return
- if _AllFloat(values):
- float_values = _GetFloats(values)
- else:
- all_floats = False
- if baseline_values:
- baseline_values = _StripNone(baseline_values)
- if baseline_values:
- if _AllFloat(baseline_values):
- float_baseline_values = _GetFloats(baseline_values)
- else:
- all_floats = False
- else:
- if self.NeedsBaseline():
- cell.value = ''
- return
- float_baseline_values = None
- if all_floats:
- self._ComputeFloat(cell, float_values, float_baseline_values)
- self._InvertIfLowerIsBetter(cell)
- else:
- self._ComputeString(cell, values, baseline_values)
+
+ def __init__(self):
+ pass
+
+ def _AllStringsSame(self, values):
+ values_set = set(values)
+ return len(values_set) == 1
+
+ def NeedsBaseline(self):
+ return False
+
+ # pylint: disable=unused-argument
+ def _Literal(self, cell, values, baseline_values):
+ cell.value = " ".join([str(v) for v in values])
+
+ def _ComputeFloat(self, cell, values, baseline_values):
+ self._Literal(cell, values, baseline_values)
+
+ def _ComputeString(self, cell, values, baseline_values):
+ self._Literal(cell, values, baseline_values)
+
+ def _InvertIfLowerIsBetter(self, cell):
+ pass
+
+ def _GetGmean(self, values):
+ if not values:
+ return float("nan")
+ if any([v < 0 for v in values]):
+ return float("nan")
+ if any([v == 0 for v in values]):
+ return 0.0
+ log_list = [math.log(v) for v in values]
+ gmean_log = sum(log_list) / len(log_list)
+ return math.exp(gmean_log)
+
+ def Compute(self, cell, values, baseline_values):
+ """Compute the result given a list of values and baseline values.
+
+ Args:
+ cell: A cell data structure to populate.
+ values: List of values.
+ baseline_values: List of baseline values. Can be none if this is the
+ baseline itself.
+ """
+ all_floats = True
+ values = _StripNone(values)
+ if not values:
+ cell.value = ""
+ return
+ if _AllFloat(values):
+ float_values = _GetFloats(values)
+ else:
+ all_floats = False
+ if baseline_values:
+ baseline_values = _StripNone(baseline_values)
+ if baseline_values:
+ if _AllFloat(baseline_values):
+ float_baseline_values = _GetFloats(baseline_values)
+ else:
+ all_floats = False
+ else:
+ if self.NeedsBaseline():
+ cell.value = ""
+ return
+ float_baseline_values = None
+ if all_floats:
+ self._ComputeFloat(cell, float_values, float_baseline_values)
+ self._InvertIfLowerIsBetter(cell)
+ else:
+ self._ComputeString(cell, values, baseline_values)
class LiteralResult(Result):
- """A literal result."""
+ """A literal result."""
- def __init__(self, iteration=0):
- super(LiteralResult, self).__init__()
- self.iteration = iteration
+ def __init__(self, iteration=0):
+ super(LiteralResult, self).__init__()
+ self.iteration = iteration
- def Compute(self, cell, values, baseline_values):
- try:
- cell.value = values[self.iteration]
- except IndexError:
- cell.value = '-'
+ def Compute(self, cell, values, baseline_values):
+ try:
+ cell.value = values[self.iteration]
+ except IndexError:
+ cell.value = "-"
class NonEmptyCountResult(Result):
- """A class that counts the number of non-empty results.
-
- The number of non-empty values will be stored in the cell.
- """
+ """A class that counts the number of non-empty results.
- def Compute(self, cell, values, baseline_values):
- """Put the number of non-empty values in the cell result.
-
- Args:
- cell: Put the result in cell.value.
- values: A list of values for the row.
- baseline_values: A list of baseline values for the row.
+ The number of non-empty values will be stored in the cell.
"""
- cell.value = len(_StripNone(values))
- if not baseline_values:
- return
- base_value = len(_StripNone(baseline_values))
- if cell.value == base_value:
- return
- f = ColorBoxFormat()
- len_values = len(values)
- len_baseline_values = len(baseline_values)
- tmp_cell = Cell()
- tmp_cell.value = 1.0 + (
- float(cell.value - base_value) / (max(len_values, len_baseline_values)))
- f.Compute(tmp_cell)
- cell.bgcolor = tmp_cell.bgcolor
+
+ def Compute(self, cell, values, baseline_values):
+ """Put the number of non-empty values in the cell result.
+
+ Args:
+ cell: Put the result in cell.value.
+ values: A list of values for the row.
+ baseline_values: A list of baseline values for the row.
+ """
+ cell.value = len(_StripNone(values))
+ if not baseline_values:
+ return
+ base_value = len(_StripNone(baseline_values))
+ if cell.value == base_value:
+ return
+ f = ColorBoxFormat()
+ len_values = len(values)
+ len_baseline_values = len(baseline_values)
+ tmp_cell = Cell()
+ tmp_cell.value = 1.0 + (
+ float(cell.value - base_value)
+ / (max(len_values, len_baseline_values))
+ )
+ f.Compute(tmp_cell)
+ cell.bgcolor = tmp_cell.bgcolor
class StringMeanResult(Result):
- """Mean of string values."""
+ """Mean of string values."""
- def _ComputeString(self, cell, values, baseline_values):
- if self._AllStringsSame(values):
- cell.value = str(values[0])
- else:
- cell.value = '?'
+ def _ComputeString(self, cell, values, baseline_values):
+ if self._AllStringsSame(values):
+ cell.value = str(values[0])
+ else:
+ cell.value = "?"
class AmeanResult(StringMeanResult):
- """Arithmetic mean."""
+ """Arithmetic mean."""
- def __init__(self, ignore_min_max=False):
- super(AmeanResult, self).__init__()
- self.ignore_min_max = ignore_min_max
+ def __init__(self, ignore_min_max=False):
+ super(AmeanResult, self).__init__()
+ self.ignore_min_max = ignore_min_max
- def _ComputeFloat(self, cell, values, baseline_values):
- if self.ignore_min_max:
- values = _RemoveMinMax(cell, values)
- cell.value = statistics.mean(values)
+ def _ComputeFloat(self, cell, values, baseline_values):
+ if self.ignore_min_max:
+ values = _RemoveMinMax(cell, values)
+ cell.value = statistics.mean(values)
class RawResult(Result):
- """Raw result."""
+ """Raw result."""
class IterationResult(Result):
- """Iteration result."""
+ """Iteration result."""
class MinResult(Result):
- """Minimum."""
+ """Minimum."""
- def _ComputeFloat(self, cell, values, baseline_values):
- cell.value = min(values)
+ def _ComputeFloat(self, cell, values, baseline_values):
+ cell.value = min(values)
- def _ComputeString(self, cell, values, baseline_values):
- if values:
- cell.value = min(values)
- else:
- cell.value = ''
+ def _ComputeString(self, cell, values, baseline_values):
+ if values:
+ cell.value = min(values)
+ else:
+ cell.value = ""
class MaxResult(Result):
- """Maximum."""
+ """Maximum."""
- def _ComputeFloat(self, cell, values, baseline_values):
- cell.value = max(values)
+ def _ComputeFloat(self, cell, values, baseline_values):
+ cell.value = max(values)
- def _ComputeString(self, cell, values, baseline_values):
- if values:
- cell.value = max(values)
- else:
- cell.value = ''
+ def _ComputeString(self, cell, values, baseline_values):
+ if values:
+ cell.value = max(values)
+ else:
+ cell.value = ""
class NumericalResult(Result):
- """Numerical result."""
+ """Numerical result."""
- def _ComputeString(self, cell, values, baseline_values):
- cell.value = '?'
+ def _ComputeString(self, cell, values, baseline_values):
+ cell.value = "?"
class StdResult(NumericalResult):
- """Standard deviation."""
+ """Standard deviation."""
- def __init__(self, ignore_min_max=False):
- super(StdResult, self).__init__()
- self.ignore_min_max = ignore_min_max
+ def __init__(self, ignore_min_max=False):
+ super(StdResult, self).__init__()
+ self.ignore_min_max = ignore_min_max
- def _ComputeFloat(self, cell, values, baseline_values):
- if self.ignore_min_max:
- values = _RemoveMinMax(cell, values)
- cell.value = statistics.pstdev(values)
+ def _ComputeFloat(self, cell, values, baseline_values):
+ if self.ignore_min_max:
+ values = _RemoveMinMax(cell, values)
+ cell.value = statistics.pstdev(values)
class CoeffVarResult(NumericalResult):
- """Standard deviation / Mean"""
+ """Standard deviation / Mean"""
- def __init__(self, ignore_min_max=False):
- super(CoeffVarResult, self).__init__()
- self.ignore_min_max = ignore_min_max
+ def __init__(self, ignore_min_max=False):
+ super(CoeffVarResult, self).__init__()
+ self.ignore_min_max = ignore_min_max
- def _ComputeFloat(self, cell, values, baseline_values):
- if self.ignore_min_max:
- values = _RemoveMinMax(cell, values)
- if statistics.mean(values) != 0.0:
- noise = abs(statistics.pstdev(values) / statistics.mean(values))
- else:
- noise = 0.0
- cell.value = noise
+ def _ComputeFloat(self, cell, values, baseline_values):
+ if self.ignore_min_max:
+ values = _RemoveMinMax(cell, values)
+ if statistics.mean(values) != 0.0:
+ noise = abs(statistics.pstdev(values) / statistics.mean(values))
+ else:
+ noise = 0.0
+ cell.value = noise
class ComparisonResult(Result):
- """Same or Different."""
-
- def NeedsBaseline(self):
- return True
-
- def _ComputeString(self, cell, values, baseline_values):
- value = None
- baseline_value = None
- if self._AllStringsSame(values):
- value = values[0]
- if self._AllStringsSame(baseline_values):
- baseline_value = baseline_values[0]
- if value is not None and baseline_value is not None:
- if value == baseline_value:
- cell.value = 'SAME'
- else:
- cell.value = 'DIFFERENT'
- else:
- cell.value = '?'
+ """Same or Different."""
+
+ def NeedsBaseline(self):
+ return True
+
+ def _ComputeString(self, cell, values, baseline_values):
+ value = None
+ baseline_value = None
+ if self._AllStringsSame(values):
+ value = values[0]
+ if self._AllStringsSame(baseline_values):
+ baseline_value = baseline_values[0]
+ if value is not None and baseline_value is not None:
+ if value == baseline_value:
+ cell.value = "SAME"
+ else:
+ cell.value = "DIFFERENT"
+ else:
+ cell.value = "?"
class PValueResult(ComparisonResult):
- """P-value."""
+ """P-value."""
- def __init__(self, ignore_min_max=False):
- super(PValueResult, self).__init__()
- self.ignore_min_max = ignore_min_max
+ def __init__(self, ignore_min_max=False):
+ super(PValueResult, self).__init__()
+ self.ignore_min_max = ignore_min_max
- def _ComputeFloat(self, cell, values, baseline_values):
- if self.ignore_min_max:
- values = _RemoveMinMax(cell, values)
- baseline_values = _RemoveMinMax(cell, baseline_values)
- if len(values) < 2 or len(baseline_values) < 2:
- cell.value = float('nan')
- return
- _, cell.value = scipy.stats.ttest_ind(values, baseline_values)
+ def _ComputeFloat(self, cell, values, baseline_values):
+ if self.ignore_min_max:
+ values = _RemoveMinMax(cell, values)
+ baseline_values = _RemoveMinMax(cell, baseline_values)
+ if len(values) < 2 or len(baseline_values) < 2:
+ cell.value = float("nan")
+ return
+ _, cell.value = scipy.stats.ttest_ind(values, baseline_values)
- def _ComputeString(self, cell, values, baseline_values):
- return float('nan')
+ def _ComputeString(self, cell, values, baseline_values):
+ return float("nan")
class KeyAwareComparisonResult(ComparisonResult):
- """Automatic key aware comparison."""
-
- def _IsLowerBetter(self, key):
- # Units in histograms should include directions
- if 'smallerIsBetter' in key:
- return True
- if 'biggerIsBetter' in key:
- return False
-
- # For units in chartjson:
- # TODO(llozano): Trying to guess direction by looking at the name of the
- # test does not seem like a good idea. Test frameworks should provide this
- # info explicitly. I believe Telemetry has this info. Need to find it out.
- #
- # Below are some test names for which we are not sure what the
- # direction is.
- #
- # For these we dont know what the direction is. But, since we dont
- # specify anything, crosperf will assume higher is better:
- # --percent_impl_scrolled--percent_impl_scrolled--percent
- # --solid_color_tiles_analyzed--solid_color_tiles_analyzed--count
- # --total_image_cache_hit_count--total_image_cache_hit_count--count
- # --total_texture_upload_time_by_url
- #
- # About these we are doubtful but we made a guess:
- # --average_num_missing_tiles_by_url--*--units (low is good)
- # --experimental_mean_frame_time_by_url--*--units (low is good)
- # --experimental_median_frame_time_by_url--*--units (low is good)
- # --texture_upload_count--texture_upload_count--count (high is good)
- # --total_deferred_image_decode_count--count (low is good)
- # --total_tiles_analyzed--total_tiles_analyzed--count (high is good)
- lower_is_better_keys = [
- 'milliseconds', 'ms_', 'seconds_', 'KB', 'rdbytes', 'wrbytes',
- 'dropped_percent', '(ms)', '(seconds)', '--ms',
- '--average_num_missing_tiles', '--experimental_jank',
- '--experimental_mean_frame', '--experimental_median_frame_time',
- '--total_deferred_image_decode_count', '--seconds', 'samples', 'bytes'
- ]
-
- return any([l in key for l in lower_is_better_keys])
-
- def _InvertIfLowerIsBetter(self, cell):
- if self._IsLowerBetter(cell.name):
- if cell.value:
- cell.value = 1.0 / cell.value
+ """Automatic key aware comparison."""
+
+ def _IsLowerBetter(self, key):
+ # Units in histograms should include directions
+ if "smallerIsBetter" in key:
+ return True
+ if "biggerIsBetter" in key:
+ return False
+
+ # For units in chartjson:
+ # TODO(llozano): Trying to guess direction by looking at the name of the
+ # test does not seem like a good idea. Test frameworks should provide this
+ # info explicitly. I believe Telemetry has this info. Need to find it out.
+ #
+ # Below are some test names for which we are not sure what the
+ # direction is.
+ #
+ # For these we dont know what the direction is. But, since we dont
+ # specify anything, crosperf will assume higher is better:
+ # --percent_impl_scrolled--percent_impl_scrolled--percent
+ # --solid_color_tiles_analyzed--solid_color_tiles_analyzed--count
+ # --total_image_cache_hit_count--total_image_cache_hit_count--count
+ # --total_texture_upload_time_by_url
+ #
+ # About these we are doubtful but we made a guess:
+ # --average_num_missing_tiles_by_url--*--units (low is good)
+ # --experimental_mean_frame_time_by_url--*--units (low is good)
+ # --experimental_median_frame_time_by_url--*--units (low is good)
+ # --texture_upload_count--texture_upload_count--count (high is good)
+ # --total_deferred_image_decode_count--count (low is good)
+ # --total_tiles_analyzed--total_tiles_analyzed--count (high is good)
+ lower_is_better_keys = [
+ "milliseconds",
+ "ms_",
+ "seconds_",
+ "KB",
+ "rdbytes",
+ "wrbytes",
+ "dropped_percent",
+ "(ms)",
+ "(seconds)",
+ "--ms",
+ "--average_num_missing_tiles",
+ "--experimental_jank",
+ "--experimental_mean_frame",
+ "--experimental_median_frame_time",
+ "--total_deferred_image_decode_count",
+ "--seconds",
+ "samples",
+ "bytes",
+ ]
+
+ return any([l in key for l in lower_is_better_keys])
+
+ def _InvertIfLowerIsBetter(self, cell):
+ if self._IsLowerBetter(cell.name):
+ if cell.value:
+ cell.value = 1.0 / cell.value
class AmeanRatioResult(KeyAwareComparisonResult):
- """Ratio of arithmetic means of values vs. baseline values."""
-
- def __init__(self, ignore_min_max=False):
- super(AmeanRatioResult, self).__init__()
- self.ignore_min_max = ignore_min_max
-
- def _ComputeFloat(self, cell, values, baseline_values):
- if self.ignore_min_max:
- values = _RemoveMinMax(cell, values)
- baseline_values = _RemoveMinMax(cell, baseline_values)
-
- baseline_mean = statistics.mean(baseline_values)
- values_mean = statistics.mean(values)
- if baseline_mean != 0:
- cell.value = values_mean / baseline_mean
- elif values_mean != 0:
- cell.value = 0.00
- # cell.value = 0 means the values and baseline_values have big difference
- else:
- cell.value = 1.00
- # no difference if both values and baseline_values are 0
+ """Ratio of arithmetic means of values vs. baseline values."""
+
+ def __init__(self, ignore_min_max=False):
+ super(AmeanRatioResult, self).__init__()
+ self.ignore_min_max = ignore_min_max
+
+ def _ComputeFloat(self, cell, values, baseline_values):
+ if self.ignore_min_max:
+ values = _RemoveMinMax(cell, values)
+ baseline_values = _RemoveMinMax(cell, baseline_values)
+
+ baseline_mean = statistics.mean(baseline_values)
+ values_mean = statistics.mean(values)
+ if baseline_mean != 0:
+ cell.value = values_mean / baseline_mean
+ elif values_mean != 0:
+ cell.value = 0.00
+ # cell.value = 0 means the values and baseline_values have big difference
+ else:
+ cell.value = 1.00
+ # no difference if both values and baseline_values are 0
class GmeanRatioResult(KeyAwareComparisonResult):
- """Ratio of geometric means of values vs. baseline values."""
-
- def __init__(self, ignore_min_max=False):
- super(GmeanRatioResult, self).__init__()
- self.ignore_min_max = ignore_min_max
-
- def _ComputeFloat(self, cell, values, baseline_values):
- if self.ignore_min_max:
- values = _RemoveMinMax(cell, values)
- baseline_values = _RemoveMinMax(cell, baseline_values)
- if self._GetGmean(baseline_values) != 0:
- cell.value = self._GetGmean(values) / self._GetGmean(baseline_values)
- elif self._GetGmean(values) != 0:
- cell.value = 0.00
- else:
- cell.value = 1.00
+ """Ratio of geometric means of values vs. baseline values."""
+
+ def __init__(self, ignore_min_max=False):
+ super(GmeanRatioResult, self).__init__()
+ self.ignore_min_max = ignore_min_max
+
+ def _ComputeFloat(self, cell, values, baseline_values):
+ if self.ignore_min_max:
+ values = _RemoveMinMax(cell, values)
+ baseline_values = _RemoveMinMax(cell, baseline_values)
+ if self._GetGmean(baseline_values) != 0:
+ cell.value = self._GetGmean(values) / self._GetGmean(
+ baseline_values
+ )
+ elif self._GetGmean(values) != 0:
+ cell.value = 0.00
+ else:
+ cell.value = 1.00
class Color(object):
- """Class that represents color in RGBA format."""
-
- def __init__(self, r=0, g=0, b=0, a=0):
- self.r = r
- self.g = g
- self.b = b
- self.a = a
-
- def __str__(self):
- return 'r: %s g: %s: b: %s: a: %s' % (self.r, self.g, self.b, self.a)
+ """Class that represents color in RGBA format."""
+
+ def __init__(self, r=0, g=0, b=0, a=0):
+ self.r = r
+ self.g = g
+ self.b = b
+ self.a = a
+
+ def __str__(self):
+ return "r: %s g: %s: b: %s: a: %s" % (self.r, self.g, self.b, self.a)
+
+ def Round(self):
+ """Round RGBA values to the nearest integer."""
+ self.r = int(self.r)
+ self.g = int(self.g)
+ self.b = int(self.b)
+ self.a = int(self.a)
+
+ def GetRGB(self):
+ """Get a hex representation of the color."""
+ return "%02x%02x%02x" % (self.r, self.g, self.b)
+
+ @classmethod
+ def Lerp(cls, ratio, a, b):
+ """Perform linear interpolation between two colors.
+
+ Args:
+ ratio: The ratio to use for linear polation.
+ a: The first color object (used when ratio is 0).
+ b: The second color object (used when ratio is 1).
+
+ Returns:
+ Linearly interpolated color.
+ """
+ ret = cls()
+ ret.r = (b.r - a.r) * ratio + a.r
+ ret.g = (b.g - a.g) * ratio + a.g
+ ret.b = (b.b - a.b) * ratio + a.b
+ ret.a = (b.a - a.a) * ratio + a.a
+ return ret
- def Round(self):
- """Round RGBA values to the nearest integer."""
- self.r = int(self.r)
- self.g = int(self.g)
- self.b = int(self.b)
- self.a = int(self.a)
- def GetRGB(self):
- """Get a hex representation of the color."""
- return '%02x%02x%02x' % (self.r, self.g, self.b)
+class Format(object):
+ """A class that represents the format of a column."""
- @classmethod
- def Lerp(cls, ratio, a, b):
- """Perform linear interpolation between two colors.
+ def __init__(self):
+ pass
- Args:
- ratio: The ratio to use for linear polation.
- a: The first color object (used when ratio is 0).
- b: The second color object (used when ratio is 1).
+ def Compute(self, cell):
+ """Computes the attributes of a cell based on its value.
- Returns:
- Linearly interpolated color.
- """
- ret = cls()
- ret.r = (b.r - a.r) * ratio + a.r
- ret.g = (b.g - a.g) * ratio + a.g
- ret.b = (b.b - a.b) * ratio + a.b
- ret.a = (b.a - a.a) * ratio + a.a
- return ret
+ Attributes typically are color, width, etc.
+ Args:
+ cell: The cell whose attributes are to be populated.
+ """
+ if cell.value is None:
+ cell.string_value = ""
+ if isinstance(cell.value, float):
+ self._ComputeFloat(cell)
+ else:
+ self._ComputeString(cell)
-class Format(object):
- """A class that represents the format of a column."""
+ def _ComputeFloat(self, cell):
+ cell.string_value = "{0:.2f}".format(cell.value)
- def __init__(self):
- pass
+ def _ComputeString(self, cell):
+ cell.string_value = str(cell.value)
- def Compute(self, cell):
- """Computes the attributes of a cell based on its value.
+ def _GetColor(self, value, low, mid, high, power=6, mid_value=1.0):
+ min_value = 0.0
+ max_value = 2.0
+ if math.isnan(value):
+ return mid
+ if value > mid_value:
+ value = max_value - mid_value / value
- Attributes typically are color, width, etc.
+ return self._GetColorBetweenRange(
+ value, min_value, mid_value, max_value, low, mid, high, power
+ )
- Args:
- cell: The cell whose attributes are to be populated.
- """
- if cell.value is None:
- cell.string_value = ''
- if isinstance(cell.value, float):
- self._ComputeFloat(cell)
- else:
- self._ComputeString(cell)
-
- def _ComputeFloat(self, cell):
- cell.string_value = '{0:.2f}'.format(cell.value)
-
- def _ComputeString(self, cell):
- cell.string_value = str(cell.value)
-
- def _GetColor(self, value, low, mid, high, power=6, mid_value=1.0):
- min_value = 0.0
- max_value = 2.0
- if math.isnan(value):
- return mid
- if value > mid_value:
- value = max_value - mid_value / value
-
- return self._GetColorBetweenRange(value, min_value, mid_value, max_value,
- low, mid, high, power)
-
- def _GetColorBetweenRange(self, value, min_value, mid_value, max_value,
- low_color, mid_color, high_color, power):
- assert value <= max_value
- assert value >= min_value
- if value > mid_value:
- value = (max_value - value) / (max_value - mid_value)
- value **= power
- ret = Color.Lerp(value, high_color, mid_color)
- else:
- value = (value - min_value) / (mid_value - min_value)
- value **= power
- ret = Color.Lerp(value, low_color, mid_color)
- ret.Round()
- return ret
+ def _GetColorBetweenRange(
+ self,
+ value,
+ min_value,
+ mid_value,
+ max_value,
+ low_color,
+ mid_color,
+ high_color,
+ power,
+ ):
+ assert value <= max_value
+ assert value >= min_value
+ if value > mid_value:
+ value = (max_value - value) / (max_value - mid_value)
+ value **= power
+ ret = Color.Lerp(value, high_color, mid_color)
+ else:
+ value = (value - min_value) / (mid_value - min_value)
+ value **= power
+ ret = Color.Lerp(value, low_color, mid_color)
+ ret.Round()
+ return ret
class PValueFormat(Format):
- """Formatting for p-value."""
+ """Formatting for p-value."""
- def _ComputeFloat(self, cell):
- cell.string_value = '%0.2f' % float(cell.value)
- if float(cell.value) < 0.05:
- cell.bgcolor = self._GetColor(
- cell.value,
- Color(255, 255, 0, 0),
- Color(255, 255, 255, 0),
- Color(255, 255, 255, 0),
- mid_value=0.05,
- power=1)
+ def _ComputeFloat(self, cell):
+ cell.string_value = "%0.2f" % float(cell.value)
+ if float(cell.value) < 0.05:
+ cell.bgcolor = self._GetColor(
+ cell.value,
+ Color(255, 255, 0, 0),
+ Color(255, 255, 255, 0),
+ Color(255, 255, 255, 0),
+ mid_value=0.05,
+ power=1,
+ )
class WeightFormat(Format):
- """Formatting for weight in cwp mode."""
+ """Formatting for weight in cwp mode."""
- def _ComputeFloat(self, cell):
- cell.string_value = '%0.4f' % float(cell.value)
+ def _ComputeFloat(self, cell):
+ cell.string_value = "%0.4f" % float(cell.value)
class StorageFormat(Format):
- """Format the cell as a storage number.
+ """Format the cell as a storage number.
- Examples:
- If the cell contains a value of 1024, the string_value will be 1.0K.
- """
-
- def _ComputeFloat(self, cell):
- base = 1024
- suffices = ['K', 'M', 'G']
- v = float(cell.value)
- current = 0
- while v >= base**(current + 1) and current < len(suffices):
- current += 1
+ Examples:
+ If the cell contains a value of 1024, the string_value will be 1.0K.
+ """
- if current:
- divisor = base**current
- cell.string_value = '%1.1f%s' % ((v / divisor), suffices[current - 1])
- else:
- cell.string_value = str(cell.value)
+ def _ComputeFloat(self, cell):
+ base = 1024
+ suffices = ["K", "M", "G"]
+ v = float(cell.value)
+ current = 0
+ while v >= base ** (current + 1) and current < len(suffices):
+ current += 1
+
+ if current:
+ divisor = base ** current
+ cell.string_value = "%1.1f%s" % (
+ (v / divisor),
+ suffices[current - 1],
+ )
+ else:
+ cell.string_value = str(cell.value)
class CoeffVarFormat(Format):
- """Format the cell as a percent.
+ """Format the cell as a percent.
- Examples:
- If the cell contains a value of 1.5, the string_value will be +150%.
- """
+ Examples:
+ If the cell contains a value of 1.5, the string_value will be +150%.
+ """
- def _ComputeFloat(self, cell):
- cell.string_value = '%1.1f%%' % (float(cell.value) * 100)
- cell.color = self._GetColor(
- cell.value,
- Color(0, 255, 0, 0),
- Color(0, 0, 0, 0),
- Color(255, 0, 0, 0),
- mid_value=0.02,
- power=1)
+ def _ComputeFloat(self, cell):
+ cell.string_value = "%1.1f%%" % (float(cell.value) * 100)
+ cell.color = self._GetColor(
+ cell.value,
+ Color(0, 255, 0, 0),
+ Color(0, 0, 0, 0),
+ Color(255, 0, 0, 0),
+ mid_value=0.02,
+ power=1,
+ )
class PercentFormat(Format):
- """Format the cell as a percent.
+ """Format the cell as a percent.
- Examples:
- If the cell contains a value of 1.5, the string_value will be +50%.
- """
+ Examples:
+ If the cell contains a value of 1.5, the string_value will be +50%.
+ """
- def _ComputeFloat(self, cell):
- cell.string_value = '%+1.1f%%' % ((float(cell.value) - 1) * 100)
- cell.color = self._GetColor(cell.value, Color(255, 0, 0, 0),
- Color(0, 0, 0, 0), Color(0, 255, 0, 0))
+ def _ComputeFloat(self, cell):
+ cell.string_value = "%+1.1f%%" % ((float(cell.value) - 1) * 100)
+ cell.color = self._GetColor(
+ cell.value,
+ Color(255, 0, 0, 0),
+ Color(0, 0, 0, 0),
+ Color(0, 255, 0, 0),
+ )
class RatioFormat(Format):
- """Format the cell as a ratio.
+ """Format the cell as a ratio.
- Examples:
- If the cell contains a value of 1.5642, the string_value will be 1.56.
- """
+ Examples:
+ If the cell contains a value of 1.5642, the string_value will be 1.56.
+ """
- def _ComputeFloat(self, cell):
- cell.string_value = '%+1.1f%%' % ((cell.value - 1) * 100)
- cell.color = self._GetColor(cell.value, Color(255, 0, 0, 0),
- Color(0, 0, 0, 0), Color(0, 255, 0, 0))
+ def _ComputeFloat(self, cell):
+ cell.string_value = "%+1.1f%%" % ((cell.value - 1) * 100)
+ cell.color = self._GetColor(
+ cell.value,
+ Color(255, 0, 0, 0),
+ Color(0, 0, 0, 0),
+ Color(0, 255, 0, 0),
+ )
class ColorBoxFormat(Format):
- """Format the cell as a color box.
+ """Format the cell as a color box.
- Examples:
- If the cell contains a value of 1.5, it will get a green color.
- If the cell contains a value of 0.5, it will get a red color.
- The intensity of the green/red will be determined by how much above or below
- 1.0 the value is.
- """
+ Examples:
+ If the cell contains a value of 1.5, it will get a green color.
+ If the cell contains a value of 0.5, it will get a red color.
+ The intensity of the green/red will be determined by how much above or below
+ 1.0 the value is.
+ """
- def _ComputeFloat(self, cell):
- cell.string_value = '--'
- bgcolor = self._GetColor(cell.value, Color(255, 0, 0, 0),
- Color(255, 255, 255, 0), Color(0, 255, 0, 0))
- cell.bgcolor = bgcolor
- cell.color = bgcolor
+ def _ComputeFloat(self, cell):
+ cell.string_value = "--"
+ bgcolor = self._GetColor(
+ cell.value,
+ Color(255, 0, 0, 0),
+ Color(255, 255, 255, 0),
+ Color(0, 255, 0, 0),
+ )
+ cell.bgcolor = bgcolor
+ cell.color = bgcolor
class Cell(object):
- """A class to represent a cell in a table.
-
- Attributes:
- value: The raw value of the cell.
- color: The color of the cell.
- bgcolor: The background color of the cell.
- string_value: The string value of the cell.
- suffix: A string suffix to be attached to the value when displaying.
- prefix: A string prefix to be attached to the value when displaying.
- color_row: Indicates whether the whole row is to inherit this cell's color.
- bgcolor_row: Indicates whether the whole row is to inherit this cell's
- bgcolor.
- width: Optional specifier to make a column narrower than the usual width.
- The usual width of a column is the max of all its cells widths.
- colspan: Set the colspan of the cell in the HTML table, this is used for
- table headers. Default value is 1.
- name: the test name of the cell.
- header: Whether this is a header in html.
- """
-
- def __init__(self):
- self.value = None
- self.color = None
- self.bgcolor = None
- self.string_value = None
- self.suffix = None
- self.prefix = None
- # Entire row inherits this color.
- self.color_row = False
- self.bgcolor_row = False
- self.width = 0
- self.colspan = 1
- self.name = None
- self.header = False
-
- def __str__(self):
- l = []
- l.append('value: %s' % self.value)
- l.append('string_value: %s' % self.string_value)
- return ' '.join(l)
+ """A class to represent a cell in a table.
+
+ Attributes:
+ value: The raw value of the cell.
+ color: The color of the cell.
+ bgcolor: The background color of the cell.
+ string_value: The string value of the cell.
+ suffix: A string suffix to be attached to the value when displaying.
+ prefix: A string prefix to be attached to the value when displaying.
+ color_row: Indicates whether the whole row is to inherit this cell's color.
+ bgcolor_row: Indicates whether the whole row is to inherit this cell's
+ bgcolor.
+ width: Optional specifier to make a column narrower than the usual width.
+ The usual width of a column is the max of all its cells widths.
+ colspan: Set the colspan of the cell in the HTML table, this is used for
+ table headers. Default value is 1.
+ name: the test name of the cell.
+ header: Whether this is a header in html.
+ """
+
+ def __init__(self):
+ self.value = None
+ self.color = None
+ self.bgcolor = None
+ self.string_value = None
+ self.suffix = None
+ self.prefix = None
+ # Entire row inherits this color.
+ self.color_row = False
+ self.bgcolor_row = False
+ self.width = 0
+ self.colspan = 1
+ self.name = None
+ self.header = False
+
+ def __str__(self):
+ l = []
+ l.append("value: %s" % self.value)
+ l.append("string_value: %s" % self.string_value)
+ return " ".join(l)
class Column(object):
- """Class representing a column in a table.
+ """Class representing a column in a table.
- Attributes:
- result: an object of the Result class.
- fmt: an object of the Format class.
- """
+ Attributes:
+ result: an object of the Result class.
+ fmt: an object of the Format class.
+ """
- def __init__(self, result, fmt, name=''):
- self.result = result
- self.fmt = fmt
- self.name = name
+ def __init__(self, result, fmt, name=""):
+ self.result = result
+ self.fmt = fmt
+ self.name = name
# Takes in:
@@ -1033,536 +1091,561 @@ class Column(object):
# ["k", avg("v", "v2"), stddev("v", "v2"), etc.]]
# according to format string
class TableFormatter(object):
- """Class to convert a plain table into a cell-table.
+ """Class to convert a plain table into a cell-table.
- This class takes in a table generated by TableGenerator and a list of column
- formats to apply to the table and returns a table of cells.
- """
+ This class takes in a table generated by TableGenerator and a list of column
+ formats to apply to the table and returns a table of cells.
+ """
- def __init__(self, table, columns, samples_table=False):
- """The constructor takes in a table and a list of columns.
+ def __init__(self, table, columns, samples_table=False):
+ """The constructor takes in a table and a list of columns.
+
+ Args:
+ table: A list of lists of values.
+ columns: A list of column containing what to produce and how to format
+ it.
+ samples_table: A flag to check whether we are generating a table of
+ samples in CWP apporximation mode.
+ """
+ self._table = table
+ self._columns = columns
+ self._samples_table = samples_table
+ self._table_columns = []
+ self._out_table = []
+
+ def GenerateCellTable(self, table_type):
+ row_index = 0
+ all_failed = False
+
+ for row in self._table[1:]:
+ # If we are generating samples_table, the second value will be weight
+ # rather than values.
+ start_col = 2 if self._samples_table else 1
+ # It does not make sense to put retval in the summary table.
+ if str(row[0]) == "retval" and table_type == "summary":
+ # Check to see if any runs passed, and update all_failed.
+ all_failed = True
+ for values in row[start_col:]:
+ if 0 in values:
+ all_failed = False
+ continue
+ key = Cell()
+ key.string_value = str(row[0])
+ out_row = [key]
+ if self._samples_table:
+ # Add one column for weight if in samples_table mode
+ weight = Cell()
+ weight.value = row[1]
+ f = WeightFormat()
+ f.Compute(weight)
+ out_row.append(weight)
+ baseline = None
+ for results in row[start_col:]:
+ column_start = 0
+ values = None
+ # If generating sample table, we will split a tuple of iterations info
+ # from the results
+ if isinstance(results, tuple):
+ it, values = results
+ column_start = 1
+ cell = Cell()
+ cell.string_value = "[%d: %d]" % (it[0], it[1])
+ out_row.append(cell)
+ if not row_index:
+ self._table_columns.append(self._columns[0])
+ else:
+ values = results
+ # Parse each column
+ for column in self._columns[column_start:]:
+ cell = Cell()
+ cell.name = key.string_value
+ if (
+ not column.result.NeedsBaseline()
+ or baseline is not None
+ ):
+ column.result.Compute(cell, values, baseline)
+ column.fmt.Compute(cell)
+ out_row.append(cell)
+ if not row_index:
+ self._table_columns.append(column)
+
+ if baseline is None:
+ baseline = values
+ self._out_table.append(out_row)
+ row_index += 1
+
+ # If this is a summary table, and the only row in it is 'retval', and
+ # all the test runs failed, we need to a 'Results' row to the output
+ # table.
+ if table_type == "summary" and all_failed and len(self._table) == 2:
+ labels_row = self._table[0]
+ key = Cell()
+ key.string_value = "Results"
+ out_row = [key]
+ baseline = None
+ for _ in labels_row[1:]:
+ for column in self._columns:
+ cell = Cell()
+ cell.name = key.string_value
+ column.result.Compute(cell, ["Fail"], baseline)
+ column.fmt.Compute(cell)
+ out_row.append(cell)
+ if not row_index:
+ self._table_columns.append(column)
+ self._out_table.append(out_row)
+
+ def AddColumnName(self):
+ """Generate Column name at the top of table."""
+ key = Cell()
+ key.header = True
+ key.string_value = "Keys" if not self._samples_table else "Benchmarks"
+ header = [key]
+ if self._samples_table:
+ weight = Cell()
+ weight.header = True
+ weight.string_value = "Weights"
+ header.append(weight)
+ for column in self._table_columns:
+ cell = Cell()
+ cell.header = True
+ if column.name:
+ cell.string_value = column.name
+ else:
+ result_name = column.result.__class__.__name__
+ format_name = column.fmt.__class__.__name__
- Args:
- table: A list of lists of values.
- columns: A list of column containing what to produce and how to format
- it.
- samples_table: A flag to check whether we are generating a table of
- samples in CWP apporximation mode.
- """
- self._table = table
- self._columns = columns
- self._samples_table = samples_table
- self._table_columns = []
- self._out_table = []
-
- def GenerateCellTable(self, table_type):
- row_index = 0
- all_failed = False
-
- for row in self._table[1:]:
- # If we are generating samples_table, the second value will be weight
- # rather than values.
- start_col = 2 if self._samples_table else 1
- # It does not make sense to put retval in the summary table.
- if str(row[0]) == 'retval' and table_type == 'summary':
- # Check to see if any runs passed, and update all_failed.
- all_failed = True
- for values in row[start_col:]:
- if 0 in values:
- all_failed = False
- continue
- key = Cell()
- key.string_value = str(row[0])
- out_row = [key]
- if self._samples_table:
- # Add one column for weight if in samples_table mode
- weight = Cell()
- weight.value = row[1]
- f = WeightFormat()
- f.Compute(weight)
- out_row.append(weight)
- baseline = None
- for results in row[start_col:]:
- column_start = 0
- values = None
- # If generating sample table, we will split a tuple of iterations info
- # from the results
- if isinstance(results, tuple):
- it, values = results
- column_start = 1
- cell = Cell()
- cell.string_value = '[%d: %d]' % (it[0], it[1])
- out_row.append(cell)
- if not row_index:
- self._table_columns.append(self._columns[0])
- else:
- values = results
- # Parse each column
- for column in self._columns[column_start:]:
- cell = Cell()
- cell.name = key.string_value
- if not column.result.NeedsBaseline() or baseline is not None:
- column.result.Compute(cell, values, baseline)
- column.fmt.Compute(cell)
- out_row.append(cell)
- if not row_index:
- self._table_columns.append(column)
-
- if baseline is None:
- baseline = values
- self._out_table.append(out_row)
- row_index += 1
-
- # If this is a summary table, and the only row in it is 'retval', and
- # all the test runs failed, we need to a 'Results' row to the output
- # table.
- if table_type == 'summary' and all_failed and len(self._table) == 2:
- labels_row = self._table[0]
- key = Cell()
- key.string_value = 'Results'
- out_row = [key]
- baseline = None
- for _ in labels_row[1:]:
- for column in self._columns:
- cell = Cell()
- cell.name = key.string_value
- column.result.Compute(cell, ['Fail'], baseline)
- column.fmt.Compute(cell)
- out_row.append(cell)
- if not row_index:
- self._table_columns.append(column)
- self._out_table.append(out_row)
-
- def AddColumnName(self):
- """Generate Column name at the top of table."""
- key = Cell()
- key.header = True
- key.string_value = 'Keys' if not self._samples_table else 'Benchmarks'
- header = [key]
- if self._samples_table:
- weight = Cell()
- weight.header = True
- weight.string_value = 'Weights'
- header.append(weight)
- for column in self._table_columns:
- cell = Cell()
- cell.header = True
- if column.name:
- cell.string_value = column.name
- else:
- result_name = column.result.__class__.__name__
- format_name = column.fmt.__class__.__name__
-
- cell.string_value = '%s %s' % (
- result_name.replace('Result', ''),
- format_name.replace('Format', ''),
- )
+ cell.string_value = "%s %s" % (
+ result_name.replace("Result", ""),
+ format_name.replace("Format", ""),
+ )
- header.append(cell)
-
- self._out_table = [header] + self._out_table
-
- def AddHeader(self, s):
- """Put additional string on the top of the table."""
- cell = Cell()
- cell.header = True
- cell.string_value = str(s)
- header = [cell]
- colspan = max(1, max(len(row) for row in self._table))
- cell.colspan = colspan
- self._out_table = [header] + self._out_table
-
- def GetPassesAndFails(self, values):
- passes = 0
- fails = 0
- for val in values:
- if val == 0:
- passes = passes + 1
- else:
- fails = fails + 1
- return passes, fails
-
- def AddLabelName(self):
- """Put label on the top of the table."""
- top_header = []
- base_colspan = len(
- [c for c in self._columns if not c.result.NeedsBaseline()])
- compare_colspan = len(self._columns)
- # Find the row with the key 'retval', if it exists. This
- # will be used to calculate the number of iterations that passed and
- # failed for each image label.
- retval_row = None
- for row in self._table:
- if row[0] == 'retval':
- retval_row = row
- # The label is organized as follows
- # "keys" label_base, label_comparison1, label_comparison2
- # The first cell has colspan 1, the second is base_colspan
- # The others are compare_colspan
- column_position = 0
- for label in self._table[0]:
- cell = Cell()
- cell.header = True
- # Put the number of pass/fail iterations in the image label header.
- if column_position > 0 and retval_row:
- retval_values = retval_row[column_position]
- if isinstance(retval_values, list):
- passes, fails = self.GetPassesAndFails(retval_values)
- cell.string_value = str(label) + ' (pass:%d fail:%d)' % (passes,
- fails)
- else:
- cell.string_value = str(label)
- else:
- cell.string_value = str(label)
- if top_header:
- if not self._samples_table or (self._samples_table and
- len(top_header) == 2):
- cell.colspan = base_colspan
- if len(top_header) > 1:
- if not self._samples_table or (self._samples_table and
- len(top_header) > 2):
- cell.colspan = compare_colspan
- top_header.append(cell)
- column_position = column_position + 1
- self._out_table = [top_header] + self._out_table
-
- def _PrintOutTable(self):
- o = ''
- for row in self._out_table:
- for cell in row:
- o += str(cell) + ' '
- o += '\n'
- print(o)
-
- def GetCellTable(self, table_type='full', headers=True):
- """Function to return a table of cells.
-
- The table (list of lists) is converted into a table of cells by this
- function.
+ header.append(cell)
- Args:
- table_type: Can be 'full' or 'summary'
- headers: A boolean saying whether we want default headers
+ self._out_table = [header] + self._out_table
- Returns:
- A table of cells with each cell having the properties and string values as
- requiested by the columns passed in the constructor.
- """
- # Generate the cell table, creating a list of dynamic columns on the fly.
- if not self._out_table:
- self.GenerateCellTable(table_type)
- if headers:
- self.AddColumnName()
- self.AddLabelName()
- return self._out_table
+ def AddHeader(self, s):
+ """Put additional string on the top of the table."""
+ cell = Cell()
+ cell.header = True
+ cell.string_value = str(s)
+ header = [cell]
+ colspan = max(1, max(len(row) for row in self._table))
+ cell.colspan = colspan
+ self._out_table = [header] + self._out_table
+
+ def GetPassesAndFails(self, values):
+ passes = 0
+ fails = 0
+ for val in values:
+ if val == 0:
+ passes = passes + 1
+ else:
+ fails = fails + 1
+ return passes, fails
+
+ def AddLabelName(self):
+ """Put label on the top of the table."""
+ top_header = []
+ base_colspan = len(
+ [c for c in self._columns if not c.result.NeedsBaseline()]
+ )
+ compare_colspan = len(self._columns)
+ # Find the row with the key 'retval', if it exists. This
+ # will be used to calculate the number of iterations that passed and
+ # failed for each image label.
+ retval_row = None
+ for row in self._table:
+ if row[0] == "retval":
+ retval_row = row
+ # The label is organized as follows
+ # "keys" label_base, label_comparison1, label_comparison2
+ # The first cell has colspan 1, the second is base_colspan
+ # The others are compare_colspan
+ column_position = 0
+ for label in self._table[0]:
+ cell = Cell()
+ cell.header = True
+ # Put the number of pass/fail iterations in the image label header.
+ if column_position > 0 and retval_row:
+ retval_values = retval_row[column_position]
+ if isinstance(retval_values, list):
+ passes, fails = self.GetPassesAndFails(retval_values)
+ cell.string_value = str(label) + " (pass:%d fail:%d)" % (
+ passes,
+ fails,
+ )
+ else:
+ cell.string_value = str(label)
+ else:
+ cell.string_value = str(label)
+ if top_header:
+ if not self._samples_table or (
+ self._samples_table and len(top_header) == 2
+ ):
+ cell.colspan = base_colspan
+ if len(top_header) > 1:
+ if not self._samples_table or (
+ self._samples_table and len(top_header) > 2
+ ):
+ cell.colspan = compare_colspan
+ top_header.append(cell)
+ column_position = column_position + 1
+ self._out_table = [top_header] + self._out_table
+
+ def _PrintOutTable(self):
+ o = ""
+ for row in self._out_table:
+ for cell in row:
+ o += str(cell) + " "
+ o += "\n"
+ print(o)
+
+ def GetCellTable(self, table_type="full", headers=True):
+ """Function to return a table of cells.
+
+ The table (list of lists) is converted into a table of cells by this
+ function.
+
+ Args:
+ table_type: Can be 'full' or 'summary'
+ headers: A boolean saying whether we want default headers
+
+ Returns:
+ A table of cells with each cell having the properties and string values as
+ requiested by the columns passed in the constructor.
+ """
+ # Generate the cell table, creating a list of dynamic columns on the fly.
+ if not self._out_table:
+ self.GenerateCellTable(table_type)
+ if headers:
+ self.AddColumnName()
+ self.AddLabelName()
+ return self._out_table
class TablePrinter(object):
- """Class to print a cell table to the console, file or html."""
- PLAIN = 0
- CONSOLE = 1
- HTML = 2
- TSV = 3
- EMAIL = 4
-
- def __init__(self, table, output_type):
- """Constructor that stores the cell table and output type."""
- self._table = table
- self._output_type = output_type
- self._row_styles = []
- self._column_styles = []
-
- # Compute whole-table properties like max-size, etc.
- def _ComputeStyle(self):
- self._row_styles = []
- for row in self._table:
- row_style = Cell()
- for cell in row:
- if cell.color_row:
- assert cell.color, 'Cell color not set but color_row set!'
- assert not row_style.color, 'Multiple row_style.colors found!'
- row_style.color = cell.color
- if cell.bgcolor_row:
- assert cell.bgcolor, 'Cell bgcolor not set but bgcolor_row set!'
- assert not row_style.bgcolor, 'Multiple row_style.bgcolors found!'
- row_style.bgcolor = cell.bgcolor
- self._row_styles.append(row_style)
-
- self._column_styles = []
- if len(self._table) < 2:
- return
-
- for i in range(max(len(row) for row in self._table)):
- column_style = Cell()
- for row in self._table:
- if not any([cell.colspan != 1 for cell in row]):
- column_style.width = max(column_style.width, len(row[i].string_value))
- self._column_styles.append(column_style)
-
- def _GetBGColorFix(self, color):
- if self._output_type == self.CONSOLE:
- prefix = misc.rgb2short(color.r, color.g, color.b)
- # pylint: disable=anomalous-backslash-in-string
- prefix = '\033[48;5;%sm' % prefix
- suffix = '\033[0m'
- elif self._output_type in [self.EMAIL, self.HTML]:
- rgb = color.GetRGB()
- prefix = ('<FONT style="BACKGROUND-COLOR:#{0}">'.format(rgb))
- suffix = '</FONT>'
- elif self._output_type in [self.PLAIN, self.TSV]:
- prefix = ''
- suffix = ''
- return prefix, suffix
-
- def _GetColorFix(self, color):
- if self._output_type == self.CONSOLE:
- prefix = misc.rgb2short(color.r, color.g, color.b)
- # pylint: disable=anomalous-backslash-in-string
- prefix = '\033[38;5;%sm' % prefix
- suffix = '\033[0m'
- elif self._output_type in [self.EMAIL, self.HTML]:
- rgb = color.GetRGB()
- prefix = '<FONT COLOR=#{0}>'.format(rgb)
- suffix = '</FONT>'
- elif self._output_type in [self.PLAIN, self.TSV]:
- prefix = ''
- suffix = ''
- return prefix, suffix
-
- def Print(self):
- """Print the table to a console, html, etc.
-
- Returns:
- A string that contains the desired representation of the table.
- """
- self._ComputeStyle()
- return self._GetStringValue()
-
- def _GetCellValue(self, i, j):
- cell = self._table[i][j]
- out = cell.string_value
- raw_width = len(out)
-
- if cell.color:
- p, s = self._GetColorFix(cell.color)
- out = '%s%s%s' % (p, out, s)
-
- if cell.bgcolor:
- p, s = self._GetBGColorFix(cell.bgcolor)
- out = '%s%s%s' % (p, out, s)
-
- if self._output_type in [self.PLAIN, self.CONSOLE, self.EMAIL]:
- if cell.width:
- width = cell.width
- else:
- if self._column_styles:
- width = self._column_styles[j].width
- else:
- width = len(cell.string_value)
- if cell.colspan > 1:
- width = 0
- start = 0
- for k in range(j):
- start += self._table[i][k].colspan
- for k in range(cell.colspan):
- width += self._column_styles[start + k].width
- if width > raw_width:
- padding = ('%' + str(width - raw_width) + 's') % ''
- out = padding + out
-
- if self._output_type == self.HTML:
- if cell.header:
- tag = 'th'
- else:
- tag = 'td'
- out = '<{0} colspan = "{2}"> {1} </{0}>'.format(tag, out, cell.colspan)
-
- return out
-
- def _GetHorizontalSeparator(self):
- if self._output_type in [self.CONSOLE, self.PLAIN, self.EMAIL]:
- return ' '
- if self._output_type == self.HTML:
- return ''
- if self._output_type == self.TSV:
- return '\t'
-
- def _GetVerticalSeparator(self):
- if self._output_type in [self.PLAIN, self.CONSOLE, self.TSV, self.EMAIL]:
- return '\n'
- if self._output_type == self.HTML:
- return '</tr>\n<tr>'
-
- def _GetPrefix(self):
- if self._output_type in [self.PLAIN, self.CONSOLE, self.TSV, self.EMAIL]:
- return ''
- if self._output_type == self.HTML:
- return '<p></p><table id="box-table-a">\n<tr>'
-
- def _GetSuffix(self):
- if self._output_type in [self.PLAIN, self.CONSOLE, self.TSV, self.EMAIL]:
- return ''
- if self._output_type == self.HTML:
- return '</tr>\n</table>'
-
- def _GetStringValue(self):
- o = ''
- o += self._GetPrefix()
- for i in range(len(self._table)):
- row = self._table[i]
- # Apply row color and bgcolor.
- p = s = bgp = bgs = ''
- if self._row_styles[i].bgcolor:
- bgp, bgs = self._GetBGColorFix(self._row_styles[i].bgcolor)
- if self._row_styles[i].color:
- p, s = self._GetColorFix(self._row_styles[i].color)
- o += p + bgp
- for j in range(len(row)):
- out = self._GetCellValue(i, j)
- o += out + self._GetHorizontalSeparator()
- o += s + bgs
- o += self._GetVerticalSeparator()
- o += self._GetSuffix()
- return o
+ """Class to print a cell table to the console, file or html."""
+
+ PLAIN = 0
+ CONSOLE = 1
+ HTML = 2
+ TSV = 3
+ EMAIL = 4
+
+ def __init__(self, table, output_type):
+ """Constructor that stores the cell table and output type."""
+ self._table = table
+ self._output_type = output_type
+ self._row_styles = []
+ self._column_styles = []
+
+ # Compute whole-table properties like max-size, etc.
+ def _ComputeStyle(self):
+ self._row_styles = []
+ for row in self._table:
+ row_style = Cell()
+ for cell in row:
+ if cell.color_row:
+ assert cell.color, "Cell color not set but color_row set!"
+ assert (
+ not row_style.color
+ ), "Multiple row_style.colors found!"
+ row_style.color = cell.color
+ if cell.bgcolor_row:
+ assert (
+ cell.bgcolor
+ ), "Cell bgcolor not set but bgcolor_row set!"
+ assert (
+ not row_style.bgcolor
+ ), "Multiple row_style.bgcolors found!"
+ row_style.bgcolor = cell.bgcolor
+ self._row_styles.append(row_style)
+
+ self._column_styles = []
+ if len(self._table) < 2:
+ return
+
+ for i in range(max(len(row) for row in self._table)):
+ column_style = Cell()
+ for row in self._table:
+ if not any([cell.colspan != 1 for cell in row]):
+ column_style.width = max(
+ column_style.width, len(row[i].string_value)
+ )
+ self._column_styles.append(column_style)
+
+ def _GetBGColorFix(self, color):
+ if self._output_type == self.CONSOLE:
+ prefix = misc.rgb2short(color.r, color.g, color.b)
+ # pylint: disable=anomalous-backslash-in-string
+ prefix = "\033[48;5;%sm" % prefix
+ suffix = "\033[0m"
+ elif self._output_type in [self.EMAIL, self.HTML]:
+ rgb = color.GetRGB()
+ prefix = '<FONT style="BACKGROUND-COLOR:#{0}">'.format(rgb)
+ suffix = "</FONT>"
+ elif self._output_type in [self.PLAIN, self.TSV]:
+ prefix = ""
+ suffix = ""
+ return prefix, suffix
+
+ def _GetColorFix(self, color):
+ if self._output_type == self.CONSOLE:
+ prefix = misc.rgb2short(color.r, color.g, color.b)
+ # pylint: disable=anomalous-backslash-in-string
+ prefix = "\033[38;5;%sm" % prefix
+ suffix = "\033[0m"
+ elif self._output_type in [self.EMAIL, self.HTML]:
+ rgb = color.GetRGB()
+ prefix = "<FONT COLOR=#{0}>".format(rgb)
+ suffix = "</FONT>"
+ elif self._output_type in [self.PLAIN, self.TSV]:
+ prefix = ""
+ suffix = ""
+ return prefix, suffix
+
+ def Print(self):
+ """Print the table to a console, html, etc.
+
+ Returns:
+ A string that contains the desired representation of the table.
+ """
+ self._ComputeStyle()
+ return self._GetStringValue()
+
+ def _GetCellValue(self, i, j):
+ cell = self._table[i][j]
+ out = cell.string_value
+ raw_width = len(out)
+
+ if cell.color:
+ p, s = self._GetColorFix(cell.color)
+ out = "%s%s%s" % (p, out, s)
+
+ if cell.bgcolor:
+ p, s = self._GetBGColorFix(cell.bgcolor)
+ out = "%s%s%s" % (p, out, s)
+
+ if self._output_type in [self.PLAIN, self.CONSOLE, self.EMAIL]:
+ if cell.width:
+ width = cell.width
+ else:
+ if self._column_styles:
+ width = self._column_styles[j].width
+ else:
+ width = len(cell.string_value)
+ if cell.colspan > 1:
+ width = 0
+ start = 0
+ for k in range(j):
+ start += self._table[i][k].colspan
+ for k in range(cell.colspan):
+ width += self._column_styles[start + k].width
+ if width > raw_width:
+ padding = ("%" + str(width - raw_width) + "s") % ""
+ out = padding + out
+
+ if self._output_type == self.HTML:
+ if cell.header:
+ tag = "th"
+ else:
+ tag = "td"
+ out = '<{0} colspan = "{2}"> {1} </{0}>'.format(
+ tag, out, cell.colspan
+ )
+
+ return out
+
+ def _GetHorizontalSeparator(self):
+ if self._output_type in [self.CONSOLE, self.PLAIN, self.EMAIL]:
+ return " "
+ if self._output_type == self.HTML:
+ return ""
+ if self._output_type == self.TSV:
+ return "\t"
+
+ def _GetVerticalSeparator(self):
+ if self._output_type in [
+ self.PLAIN,
+ self.CONSOLE,
+ self.TSV,
+ self.EMAIL,
+ ]:
+ return "\n"
+ if self._output_type == self.HTML:
+ return "</tr>\n<tr>"
+
+ def _GetPrefix(self):
+ if self._output_type in [
+ self.PLAIN,
+ self.CONSOLE,
+ self.TSV,
+ self.EMAIL,
+ ]:
+ return ""
+ if self._output_type == self.HTML:
+ return '<p></p><table id="box-table-a">\n<tr>'
+
+ def _GetSuffix(self):
+ if self._output_type in [
+ self.PLAIN,
+ self.CONSOLE,
+ self.TSV,
+ self.EMAIL,
+ ]:
+ return ""
+ if self._output_type == self.HTML:
+ return "</tr>\n</table>"
+
+ def _GetStringValue(self):
+ o = ""
+ o += self._GetPrefix()
+ for i in range(len(self._table)):
+ row = self._table[i]
+ # Apply row color and bgcolor.
+ p = s = bgp = bgs = ""
+ if self._row_styles[i].bgcolor:
+ bgp, bgs = self._GetBGColorFix(self._row_styles[i].bgcolor)
+ if self._row_styles[i].color:
+ p, s = self._GetColorFix(self._row_styles[i].color)
+ o += p + bgp
+ for j in range(len(row)):
+ out = self._GetCellValue(i, j)
+ o += out + self._GetHorizontalSeparator()
+ o += s + bgs
+ o += self._GetVerticalSeparator()
+ o += self._GetSuffix()
+ return o
# Some common drivers
def GetSimpleTable(table, out_to=TablePrinter.CONSOLE):
- """Prints a simple table.
-
- This is used by code that has a very simple list-of-lists and wants to
- produce a table with ameans, a percentage ratio of ameans and a colorbox.
-
- Examples:
- GetSimpleConsoleTable([["binary", "b1", "b2"],["size", "300", "400"]])
- will produce a colored table that can be printed to the console.
-
- Args:
- table: a list of lists.
- out_to: specify the fomat of output. Currently it supports HTML and CONSOLE.
-
- Returns:
- A string version of the table that can be printed to the console.
- """
- columns = [
- Column(AmeanResult(), Format()),
- Column(AmeanRatioResult(), PercentFormat()),
- Column(AmeanRatioResult(), ColorBoxFormat()),
- ]
- our_table = [table[0]]
- for row in table[1:]:
- our_row = [row[0]]
- for v in row[1:]:
- our_row.append([v])
- our_table.append(our_row)
-
- tf = TableFormatter(our_table, columns)
- cell_table = tf.GetCellTable()
- tp = TablePrinter(cell_table, out_to)
- return tp.Print()
+ """Prints a simple table.
+
+ This is used by code that has a very simple list-of-lists and wants to
+ produce a table with ameans, a percentage ratio of ameans and a colorbox.
+
+ Examples:
+ GetSimpleConsoleTable([["binary", "b1", "b2"],["size", "300", "400"]])
+ will produce a colored table that can be printed to the console.
+
+ Args:
+ table: a list of lists.
+ out_to: specify the fomat of output. Currently it supports HTML and CONSOLE.
+
+ Returns:
+ A string version of the table that can be printed to the console.
+ """
+ columns = [
+ Column(AmeanResult(), Format()),
+ Column(AmeanRatioResult(), PercentFormat()),
+ Column(AmeanRatioResult(), ColorBoxFormat()),
+ ]
+ our_table = [table[0]]
+ for row in table[1:]:
+ our_row = [row[0]]
+ for v in row[1:]:
+ our_row.append([v])
+ our_table.append(our_row)
+
+ tf = TableFormatter(our_table, columns)
+ cell_table = tf.GetCellTable()
+ tp = TablePrinter(cell_table, out_to)
+ return tp.Print()
# pylint: disable=redefined-outer-name
def GetComplexTable(runs, labels, out_to=TablePrinter.CONSOLE):
- """Prints a complex table.
+ """Prints a complex table.
- This can be used to generate a table with arithmetic mean, standard deviation,
- coefficient of variation, p-values, etc.
+ This can be used to generate a table with arithmetic mean, standard deviation,
+ coefficient of variation, p-values, etc.
- Args:
- runs: A list of lists with data to tabulate.
- labels: A list of labels that correspond to the runs.
- out_to: specifies the format of the table (example CONSOLE or HTML).
+ Args:
+ runs: A list of lists with data to tabulate.
+ labels: A list of labels that correspond to the runs.
+ out_to: specifies the format of the table (example CONSOLE or HTML).
- Returns:
- A string table that can be printed to the console or put in an HTML file.
- """
- tg = TableGenerator(runs, labels, TableGenerator.SORT_BY_VALUES_DESC)
- table = tg.GetTable()
- columns = [
- Column(LiteralResult(), Format(), 'Literal'),
- Column(AmeanResult(), Format()),
- Column(StdResult(), Format()),
- Column(CoeffVarResult(), CoeffVarFormat()),
- Column(NonEmptyCountResult(), Format()),
- Column(AmeanRatioResult(), PercentFormat()),
- Column(AmeanRatioResult(), RatioFormat()),
- Column(GmeanRatioResult(), RatioFormat()),
- Column(PValueResult(), PValueFormat())
- ]
- tf = TableFormatter(table, columns)
- cell_table = tf.GetCellTable()
- tp = TablePrinter(cell_table, out_to)
- return tp.Print()
-
-
-if __name__ == '__main__':
- # Run a few small tests here.
- run1 = {
- 'k1': '10',
- 'k2': '12',
- 'k5': '40',
- 'k6': '40',
- 'ms_1': '20',
- 'k7': 'FAIL',
- 'k8': 'PASS',
- 'k9': 'PASS',
- 'k10': '0'
- }
- run2 = {
- 'k1': '13',
- 'k2': '14',
- 'k3': '15',
- 'ms_1': '10',
- 'k8': 'PASS',
- 'k9': 'FAIL',
- 'k10': '0'
- }
- run3 = {
- 'k1': '50',
- 'k2': '51',
- 'k3': '52',
- 'k4': '53',
- 'k5': '35',
- 'k6': '45',
- 'ms_1': '200',
- 'ms_2': '20',
- 'k7': 'FAIL',
- 'k8': 'PASS',
- 'k9': 'PASS'
- }
- runs = [[run1, run2], [run3]]
- labels = ['vanilla', 'modified']
- t = GetComplexTable(runs, labels, TablePrinter.CONSOLE)
- print(t)
- email = GetComplexTable(runs, labels, TablePrinter.EMAIL)
-
- runs = [[{
- 'k1': '1'
- }, {
- 'k1': '1.1'
- }, {
- 'k1': '1.2'
- }], [{
- 'k1': '5'
- }, {
- 'k1': '5.1'
- }, {
- 'k1': '5.2'
- }]]
- t = GetComplexTable(runs, labels, TablePrinter.CONSOLE)
- print(t)
-
- simple_table = [
- ['binary', 'b1', 'b2', 'b3'],
- ['size', 100, 105, 108],
- ['rodata', 100, 80, 70],
- ['data', 100, 100, 100],
- ['debug', 100, 140, 60],
- ]
- t = GetSimpleTable(simple_table)
- print(t)
- email += GetSimpleTable(simple_table, TablePrinter.HTML)
- email_to = [getpass.getuser()]
- email = "<pre style='font-size: 13px'>%s</pre>" % email
- EmailSender().SendEmail(email_to, 'SimpleTableTest', email, msg_type='html')
+ Returns:
+ A string table that can be printed to the console or put in an HTML file.
+ """
+ tg = TableGenerator(runs, labels, TableGenerator.SORT_BY_VALUES_DESC)
+ table = tg.GetTable()
+ columns = [
+ Column(LiteralResult(), Format(), "Literal"),
+ Column(AmeanResult(), Format()),
+ Column(StdResult(), Format()),
+ Column(CoeffVarResult(), CoeffVarFormat()),
+ Column(NonEmptyCountResult(), Format()),
+ Column(AmeanRatioResult(), PercentFormat()),
+ Column(AmeanRatioResult(), RatioFormat()),
+ Column(GmeanRatioResult(), RatioFormat()),
+ Column(PValueResult(), PValueFormat()),
+ ]
+ tf = TableFormatter(table, columns)
+ cell_table = tf.GetCellTable()
+ tp = TablePrinter(cell_table, out_to)
+ return tp.Print()
+
+
+if __name__ == "__main__":
+ # Run a few small tests here.
+ run1 = {
+ "k1": "10",
+ "k2": "12",
+ "k5": "40",
+ "k6": "40",
+ "ms_1": "20",
+ "k7": "FAIL",
+ "k8": "PASS",
+ "k9": "PASS",
+ "k10": "0",
+ }
+ run2 = {
+ "k1": "13",
+ "k2": "14",
+ "k3": "15",
+ "ms_1": "10",
+ "k8": "PASS",
+ "k9": "FAIL",
+ "k10": "0",
+ }
+ run3 = {
+ "k1": "50",
+ "k2": "51",
+ "k3": "52",
+ "k4": "53",
+ "k5": "35",
+ "k6": "45",
+ "ms_1": "200",
+ "ms_2": "20",
+ "k7": "FAIL",
+ "k8": "PASS",
+ "k9": "PASS",
+ }
+ runs = [[run1, run2], [run3]]
+ labels = ["vanilla", "modified"]
+ t = GetComplexTable(runs, labels, TablePrinter.CONSOLE)
+ print(t)
+ email = GetComplexTable(runs, labels, TablePrinter.EMAIL)
+
+ runs = [
+ [{"k1": "1"}, {"k1": "1.1"}, {"k1": "1.2"}],
+ [{"k1": "5"}, {"k1": "5.1"}, {"k1": "5.2"}],
+ ]
+ t = GetComplexTable(runs, labels, TablePrinter.CONSOLE)
+ print(t)
+
+ simple_table = [
+ ["binary", "b1", "b2", "b3"],
+ ["size", 100, 105, 108],
+ ["rodata", 100, 80, 70],
+ ["data", 100, 100, 100],
+ ["debug", 100, 140, 60],
+ ]
+ t = GetSimpleTable(simple_table)
+ print(t)
+ email += GetSimpleTable(simple_table, TablePrinter.HTML)
+ email_to = [getpass.getuser()]
+ email = "<pre style='font-size: 13px'>%s</pre>" % email
+ EmailSender().SendEmail(email_to, "SimpleTableTest", email, msg_type="html")
diff --git a/cros_utils/tabulator_test.py b/cros_utils/tabulator_test.py
index 9dd4828e..91ce8fd5 100755
--- a/cros_utils/tabulator_test.py
+++ b/cros_utils/tabulator_test.py
@@ -1,12 +1,11 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
+# Copyright 2012 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for the tabulator module."""
-from __future__ import print_function
__author__ = 'asharif@google.com (Ahmad Sharif)'
diff --git a/cros_utils/timeline.py b/cros_utils/timeline.py
index cce0b05c..f18a39bb 100644
--- a/cros_utils/timeline.py
+++ b/cros_utils/timeline.py
@@ -1,55 +1,55 @@
# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tools for recording and reporting timeline of benchmark_run."""
-from __future__ import print_function
-__author__ = 'yunlian@google.com (Yunlian Jiang)'
+__author__ = "yunlian@google.com (Yunlian Jiang)"
import time
class Event(object):
- """One event on the timeline."""
+ """One event on the timeline."""
- def __init__(self, name='', cur_time=0):
- self.name = name
- self.timestamp = cur_time
+ def __init__(self, name="", cur_time=0):
+ self.name = name
+ self.timestamp = cur_time
class Timeline(object):
- """Use a dict to store the timeline."""
-
- def __init__(self):
- self.events = []
-
- def Record(self, event):
- for e in self.events:
- assert e.name != event, (
- 'The event {0} is already recorded.'.format(event))
- cur_event = Event(name=event, cur_time=time.time())
- self.events.append(cur_event)
-
- def GetEvents(self):
- return ([e.name for e in self.events])
-
- def GetEventDict(self):
- tl = {}
- for e in self.events:
- tl[e.name] = e.timestamp
- return tl
-
- def GetEventTime(self, event):
- for e in self.events:
- if e.name == event:
- return e.timestamp
- raise IndexError('The event {0} is not recorded'.format(event))
-
- def GetLastEventTime(self):
- return self.events[-1].timestamp
-
- def GetLastEvent(self):
- return self.events[-1].name
+ """Use a dict to store the timeline."""
+
+ def __init__(self):
+ self.events = []
+
+ def Record(self, event):
+ for e in self.events:
+ assert e.name != event, "The event {0} is already recorded.".format(
+ event
+ )
+ cur_event = Event(name=event, cur_time=time.time())
+ self.events.append(cur_event)
+
+ def GetEvents(self):
+ return [e.name for e in self.events]
+
+ def GetEventDict(self):
+ tl = {}
+ for e in self.events:
+ tl[e.name] = e.timestamp
+ return tl
+
+ def GetEventTime(self, event):
+ for e in self.events:
+ if e.name == event:
+ return e.timestamp
+ raise IndexError("The event {0} is not recorded".format(event))
+
+ def GetLastEventTime(self):
+ return self.events[-1].timestamp
+
+ def GetLastEvent(self):
+ return self.events[-1].name
diff --git a/cros_utils/timeline_test.py b/cros_utils/timeline_test.py
index 8a10e549..aceab2df 100755
--- a/cros_utils/timeline_test.py
+++ b/cros_utils/timeline_test.py
@@ -1,14 +1,13 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for time_line.py."""
-from __future__ import print_function
-__author__ = 'yunlian@google.com (Yunlian Jiang)'
+__author__ = "yunlian@google.com (Yunlian Jiang)"
import time
import unittest
@@ -17,46 +16,46 @@ from cros_utils import timeline
class TimeLineTest(unittest.TestCase):
- """Tests for the Timeline class."""
-
- def testRecord(self):
- tl = timeline.Timeline()
- tl.Record('A')
- t = time.time()
- t1 = tl.events[0].timestamp
- self.assertEqual(int(t1 - t), 0)
- self.assertRaises(AssertionError, tl.Record, 'A')
-
- def testGetEvents(self):
- tl = timeline.Timeline()
- tl.Record('A')
- e = tl.GetEvents()
- self.assertEqual(e, ['A'])
- tl.Record('B')
- e = tl.GetEvents()
- self.assertEqual(e, ['A', 'B'])
-
- def testGetEventTime(self):
- tl = timeline.Timeline()
- tl.Record('A')
- t = time.time()
- t1 = tl.GetEventTime('A')
- self.assertEqual(int(t1 - t), 0)
- self.assertRaises(IndexError, tl.GetEventTime, 'B')
-
- def testGetLastEventTime(self):
- tl = timeline.Timeline()
- self.assertRaises(IndexError, tl.GetLastEventTime)
- tl.Record('A')
- t = time.time()
- t1 = tl.GetLastEventTime()
- self.assertEqual(int(t1 - t), 0)
- time.sleep(2)
- tl.Record('B')
- t = time.time()
- t1 = tl.GetLastEventTime()
- self.assertEqual(int(t1 - t), 0)
-
-
-if __name__ == '__main__':
- unittest.main()
+ """Tests for the Timeline class."""
+
+ def testRecord(self):
+ tl = timeline.Timeline()
+ tl.Record("A")
+ t = time.time()
+ t1 = tl.events[0].timestamp
+ self.assertEqual(int(t1 - t), 0)
+ self.assertRaises(AssertionError, tl.Record, "A")
+
+ def testGetEvents(self):
+ tl = timeline.Timeline()
+ tl.Record("A")
+ e = tl.GetEvents()
+ self.assertEqual(e, ["A"])
+ tl.Record("B")
+ e = tl.GetEvents()
+ self.assertEqual(e, ["A", "B"])
+
+ def testGetEventTime(self):
+ tl = timeline.Timeline()
+ tl.Record("A")
+ t = time.time()
+ t1 = tl.GetEventTime("A")
+ self.assertEqual(int(t1 - t), 0)
+ self.assertRaises(IndexError, tl.GetEventTime, "B")
+
+ def testGetLastEventTime(self):
+ tl = timeline.Timeline()
+ self.assertRaises(IndexError, tl.GetLastEventTime)
+ tl.Record("A")
+ t = time.time()
+ t1 = tl.GetLastEventTime()
+ self.assertEqual(int(t1 - t), 0)
+ time.sleep(2)
+ tl.Record("B")
+ t = time.time()
+ t1 = tl.GetLastEventTime()
+ self.assertEqual(int(t1 - t), 0)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/cros_utils/tiny_render.py b/cros_utils/tiny_render.py
index 629e7719..6168a247 100644
--- a/cros_utils/tiny_render.py
+++ b/cros_utils/tiny_render.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -51,18 +51,18 @@ Turns into
The rendering functions should never mutate your input.
"""
-from __future__ import print_function
import collections
import html
import typing as t
-Bold = collections.namedtuple('Bold', ['inner'])
-LineBreak = collections.namedtuple('LineBreak', [])
-Link = collections.namedtuple('Link', ['href', 'inner'])
-UnorderedList = collections.namedtuple('UnorderedList', ['items'])
+
+Bold = collections.namedtuple("Bold", ["inner"])
+LineBreak = collections.namedtuple("LineBreak", [])
+Link = collections.namedtuple("Link", ["href", "inner"])
+UnorderedList = collections.namedtuple("UnorderedList", ["items"])
# Outputs different data depending on whether we're emitting text or HTML.
-Switch = collections.namedtuple('Switch', ['text', 'html'])
+Switch = collections.namedtuple("Switch", ["text", "html"])
line_break = LineBreak()
@@ -85,97 +85,98 @@ line_break = LineBreak()
Piece = t.Any # pylint: disable=invalid-name
-def _render_text_pieces(piece: Piece, indent_level: int,
- into: t.List[str]) -> None:
- """Helper for |render_text_pieces|. Accumulates strs into |into|."""
- if isinstance(piece, LineBreak):
- into.append('\n' + indent_level * ' ')
- return
+def _render_text_pieces(
+ piece: Piece, indent_level: int, into: t.List[str]
+) -> None:
+ """Helper for |render_text_pieces|. Accumulates strs into |into|."""
+ if isinstance(piece, LineBreak):
+ into.append("\n" + indent_level * " ")
+ return
- if isinstance(piece, str):
- into.append(piece)
- return
+ if isinstance(piece, str):
+ into.append(piece)
+ return
- if isinstance(piece, Bold):
- into.append('**')
- _render_text_pieces(piece.inner, indent_level, into)
- into.append('**')
- return
+ if isinstance(piece, Bold):
+ into.append("**")
+ _render_text_pieces(piece.inner, indent_level, into)
+ into.append("**")
+ return
- if isinstance(piece, Link):
- # Don't even try; it's ugly more often than not.
- _render_text_pieces(piece.inner, indent_level, into)
- return
+ if isinstance(piece, Link):
+ # Don't even try; it's ugly more often than not.
+ _render_text_pieces(piece.inner, indent_level, into)
+ return
- if isinstance(piece, UnorderedList):
- for p in piece.items:
- _render_text_pieces([line_break, '- ', p], indent_level + 2, into)
- return
+ if isinstance(piece, UnorderedList):
+ for p in piece.items:
+ _render_text_pieces([line_break, "- ", p], indent_level + 2, into)
+ return
- if isinstance(piece, Switch):
- _render_text_pieces(piece.text, indent_level, into)
- return
+ if isinstance(piece, Switch):
+ _render_text_pieces(piece.text, indent_level, into)
+ return
- if isinstance(piece, (list, tuple)):
- for p in piece:
- _render_text_pieces(p, indent_level, into)
- return
+ if isinstance(piece, (list, tuple)):
+ for p in piece:
+ _render_text_pieces(p, indent_level, into)
+ return
- raise ValueError('Unknown piece type: %s' % type(piece))
+ raise ValueError("Unknown piece type: %s" % type(piece))
def render_text_pieces(piece: Piece) -> str:
- """Renders the given Pieces into text."""
- into = []
- _render_text_pieces(piece, 0, into)
- return ''.join(into)
+ """Renders the given Pieces into text."""
+ into = []
+ _render_text_pieces(piece, 0, into)
+ return "".join(into)
def _render_html_pieces(piece: Piece, into: t.List[str]) -> None:
- """Helper for |render_html_pieces|. Accumulates strs into |into|."""
- if piece is line_break:
- into.append('<br />\n')
- return
-
- if isinstance(piece, str):
- into.append(html.escape(piece))
- return
-
- if isinstance(piece, Bold):
- into.append('<b>')
- _render_html_pieces(piece.inner, into)
- into.append('</b>')
- return
-
- if isinstance(piece, Link):
- into.append('<a href="' + piece.href + '">')
- _render_html_pieces(piece.inner, into)
- into.append('</a>')
- return
-
- if isinstance(piece, UnorderedList):
- into.append('<ul>\n')
- for p in piece.items:
- into.append('<li>')
- _render_html_pieces(p, into)
- into.append('</li>\n')
- into.append('</ul>\n')
- return
-
- if isinstance(piece, Switch):
- _render_html_pieces(piece.html, into)
- return
-
- if isinstance(piece, (list, tuple)):
- for p in piece:
- _render_html_pieces(p, into)
- return
-
- raise ValueError('Unknown piece type: %s' % type(piece))
+ """Helper for |render_html_pieces|. Accumulates strs into |into|."""
+ if piece is line_break:
+ into.append("<br />\n")
+ return
+
+ if isinstance(piece, str):
+ into.append(html.escape(piece))
+ return
+
+ if isinstance(piece, Bold):
+ into.append("<b>")
+ _render_html_pieces(piece.inner, into)
+ into.append("</b>")
+ return
+
+ if isinstance(piece, Link):
+ into.append('<a href="' + piece.href + '">')
+ _render_html_pieces(piece.inner, into)
+ into.append("</a>")
+ return
+
+ if isinstance(piece, UnorderedList):
+ into.append("<ul>\n")
+ for p in piece.items:
+ into.append("<li>")
+ _render_html_pieces(p, into)
+ into.append("</li>\n")
+ into.append("</ul>\n")
+ return
+
+ if isinstance(piece, Switch):
+ _render_html_pieces(piece.html, into)
+ return
+
+ if isinstance(piece, (list, tuple)):
+ for p in piece:
+ _render_html_pieces(p, into)
+ return
+
+ raise ValueError("Unknown piece type: %s" % type(piece))
def render_html_pieces(piece: Piece) -> str:
- """Renders the given Pieces into HTML."""
- into = []
- _render_html_pieces(piece, into)
- return ''.join(into)
+ """Renders the given Pieces into HTML."""
+ into = []
+ _render_html_pieces(piece, into)
+ return "".join(into)
diff --git a/cros_utils/tiny_render_test.py b/cros_utils/tiny_render_test.py
index 114a1796..9c4d750b 100755
--- a/cros_utils/tiny_render_test.py
+++ b/cros_utils/tiny_render_test.py
@@ -1,12 +1,11 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for tiny_render."""
-from __future__ import print_function
import unittest
@@ -18,160 +17,184 @@ import tiny_render
# shipped alongside the plain-text, the hope is that people won't have to
# subject themselves to reading the HTML often. :)
class Test(unittest.TestCase):
- """Tests for tiny_render."""
-
- def test_bold(self):
- pieces = [
- tiny_render.Bold('hello'),
- ', ',
- tiny_render.Bold(['world', '!']),
- ]
-
- self.assertEqual(
- tiny_render.render_text_pieces(pieces),
- '**hello**, **world!**',
- )
-
- self.assertEqual(
- tiny_render.render_html_pieces(pieces),
- '<b>hello</b>, <b>world!</b>',
- )
-
- def test_line_break(self):
- pieces = [
- 'hello',
- tiny_render.line_break,
- ['world', '!'],
- ]
-
- self.assertEqual(
- tiny_render.render_text_pieces(pieces),
- 'hello\nworld!',
- )
-
- self.assertEqual(
- tiny_render.render_html_pieces(pieces),
- 'hello<br />\nworld!',
- )
-
- def test_linkification(self):
- pieces = [
- 'hello ',
- tiny_render.Link(href='https://google.com', inner='world!'),
- ]
-
- self.assertEqual(
- tiny_render.render_text_pieces(pieces),
- 'hello world!',
- )
-
- self.assertEqual(
- tiny_render.render_html_pieces(pieces),
- 'hello <a href="https://google.com">world!</a>',
- )
-
- def test_unordered_list(self):
- pieces = [
- 'hello:',
- tiny_render.UnorderedList([
- 'world',
- 'w o r l d',
- ]),
- ]
-
- self.assertEqual(
- tiny_render.render_text_pieces(pieces),
- '\n'.join((
- 'hello:',
- ' - world',
- ' - w o r l d',
- )),
- )
-
- self.assertEqual(
- tiny_render.render_html_pieces(pieces),
- '\n'.join((
- 'hello:<ul>',
- '<li>world</li>',
- '<li>w o r l d</li>',
- '</ul>',
- '',
- )),
- )
-
- def test_nested_unordered_list(self):
- pieces = [
- 'hello:',
- tiny_render.UnorderedList([
- 'world',
- ['and more:', tiny_render.UnorderedList(['w o r l d'])],
- 'world2',
- ])
- ]
-
- self.assertEqual(
- tiny_render.render_text_pieces(pieces),
- '\n'.join((
- 'hello:',
- ' - world',
- ' - and more:',
- ' - w o r l d',
- ' - world2',
- )),
- )
-
- self.assertEqual(
- tiny_render.render_html_pieces(pieces),
- '\n'.join((
- 'hello:<ul>',
- '<li>world</li>',
- '<li>and more:<ul>',
- '<li>w o r l d</li>',
- '</ul>',
- '</li>',
- '<li>world2</li>',
- '</ul>',
- '',
- )),
- )
-
- def test_switch(self):
- pieces = ['hello ', tiny_render.Switch(text='text', html='html')]
- self.assertEqual(tiny_render.render_text_pieces(pieces), 'hello text')
- self.assertEqual(tiny_render.render_html_pieces(pieces), 'hello html')
-
- def test_golden(self):
- pieces = [
- 'hello',
- tiny_render.UnorderedList([
- tiny_render.Switch(text='text', html=tiny_render.Bold('html')),
- 'the',
- tiny_render.Bold('sun'),
- ]),
- tiny_render.line_break,
- ['is', ' out!'],
- ]
-
- self.assertEqual(
- tiny_render.render_text_pieces(pieces), '\n'.join((
- 'hello',
- ' - text',
- ' - the',
- ' - **sun**',
- 'is out!',
- )))
-
- self.assertEqual(
- tiny_render.render_html_pieces(pieces), '\n'.join((
- 'hello<ul>',
- '<li><b>html</b></li>',
- '<li>the</li>',
- '<li><b>sun</b></li>',
- '</ul>',
- '<br />',
- 'is out!',
- )))
-
-
-if __name__ == '__main__':
- unittest.main()
+ """Tests for tiny_render."""
+
+ def test_bold(self):
+ pieces = [
+ tiny_render.Bold("hello"),
+ ", ",
+ tiny_render.Bold(["world", "!"]),
+ ]
+
+ self.assertEqual(
+ tiny_render.render_text_pieces(pieces),
+ "**hello**, **world!**",
+ )
+
+ self.assertEqual(
+ tiny_render.render_html_pieces(pieces),
+ "<b>hello</b>, <b>world!</b>",
+ )
+
+ def test_line_break(self):
+ pieces = [
+ "hello",
+ tiny_render.line_break,
+ ["world", "!"],
+ ]
+
+ self.assertEqual(
+ tiny_render.render_text_pieces(pieces),
+ "hello\nworld!",
+ )
+
+ self.assertEqual(
+ tiny_render.render_html_pieces(pieces),
+ "hello<br />\nworld!",
+ )
+
+ def test_linkification(self):
+ pieces = [
+ "hello ",
+ tiny_render.Link(href="https://google.com", inner="world!"),
+ ]
+
+ self.assertEqual(
+ tiny_render.render_text_pieces(pieces),
+ "hello world!",
+ )
+
+ self.assertEqual(
+ tiny_render.render_html_pieces(pieces),
+ 'hello <a href="https://google.com">world!</a>',
+ )
+
+ def test_unordered_list(self):
+ pieces = [
+ "hello:",
+ tiny_render.UnorderedList(
+ [
+ "world",
+ "w o r l d",
+ ]
+ ),
+ ]
+
+ self.assertEqual(
+ tiny_render.render_text_pieces(pieces),
+ "\n".join(
+ (
+ "hello:",
+ " - world",
+ " - w o r l d",
+ )
+ ),
+ )
+
+ self.assertEqual(
+ tiny_render.render_html_pieces(pieces),
+ "\n".join(
+ (
+ "hello:<ul>",
+ "<li>world</li>",
+ "<li>w o r l d</li>",
+ "</ul>",
+ "",
+ )
+ ),
+ )
+
+ def test_nested_unordered_list(self):
+ pieces = [
+ "hello:",
+ tiny_render.UnorderedList(
+ [
+ "world",
+ ["and more:", tiny_render.UnorderedList(["w o r l d"])],
+ "world2",
+ ]
+ ),
+ ]
+
+ self.assertEqual(
+ tiny_render.render_text_pieces(pieces),
+ "\n".join(
+ (
+ "hello:",
+ " - world",
+ " - and more:",
+ " - w o r l d",
+ " - world2",
+ )
+ ),
+ )
+
+ self.assertEqual(
+ tiny_render.render_html_pieces(pieces),
+ "\n".join(
+ (
+ "hello:<ul>",
+ "<li>world</li>",
+ "<li>and more:<ul>",
+ "<li>w o r l d</li>",
+ "</ul>",
+ "</li>",
+ "<li>world2</li>",
+ "</ul>",
+ "",
+ )
+ ),
+ )
+
+ def test_switch(self):
+ pieces = ["hello ", tiny_render.Switch(text="text", html="html")]
+ self.assertEqual(tiny_render.render_text_pieces(pieces), "hello text")
+ self.assertEqual(tiny_render.render_html_pieces(pieces), "hello html")
+
+ def test_golden(self):
+ pieces = [
+ "hello",
+ tiny_render.UnorderedList(
+ [
+ tiny_render.Switch(
+ text="text", html=tiny_render.Bold("html")
+ ),
+ "the",
+ tiny_render.Bold("sun"),
+ ]
+ ),
+ tiny_render.line_break,
+ ["is", " out!"],
+ ]
+
+ self.assertEqual(
+ tiny_render.render_text_pieces(pieces),
+ "\n".join(
+ (
+ "hello",
+ " - text",
+ " - the",
+ " - **sun**",
+ "is out!",
+ )
+ ),
+ )
+
+ self.assertEqual(
+ tiny_render.render_html_pieces(pieces),
+ "\n".join(
+ (
+ "hello<ul>",
+ "<li><b>html</b></li>",
+ "<li>the</li>",
+ "<li><b>sun</b></li>",
+ "</ul>",
+ "<br />",
+ "is out!",
+ )
+ ),
+ )
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/cros_utils/toolchain_utils.sh b/cros_utils/toolchain_utils.sh
index 5e9a2a32..b5403bbe 100644
--- a/cros_utils/toolchain_utils.sh
+++ b/cros_utils/toolchain_utils.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
+# Copyright 2012 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.