aboutsummaryrefslogtreecommitdiff
path: root/llvm_tools
diff options
context:
space:
mode:
Diffstat (limited to 'llvm_tools')
-rw-r--r--llvm_tools/README.md43
-rwxr-xr-xllvm_tools/auto_llvm_bisection.py276
-rwxr-xr-xllvm_tools/auto_llvm_bisection_unittest.py489
-rwxr-xr-xllvm_tools/bisect_clang_crashes.py240
-rwxr-xr-xllvm_tools/bisect_clang_crashes_unittest.py156
-rwxr-xr-xllvm_tools/check_clang_diags.py223
-rwxr-xr-xllvm_tools/check_clang_diags_test.py110
-rwxr-xr-xllvm_tools/chroot.py113
-rwxr-xr-xllvm_tools/chroot_unittest.py75
-rwxr-xr-xllvm_tools/copy_helpers_to_chromiumos_overlay.py76
-rwxr-xr-xllvm_tools/custom_script_example.py114
-rw-r--r--llvm_tools/failure_modes.py19
-rwxr-xr-xllvm_tools/fetch_cros_sdk_rolls.py158
-rwxr-xr-xllvm_tools/get_llvm_hash.py589
-rwxr-xr-xllvm_tools/get_llvm_hash_unittest.py250
-rwxr-xr-xllvm_tools/get_upstream_patch.py929
-rwxr-xr-xllvm_tools/git.py189
-rwxr-xr-xllvm_tools/git_llvm_rev.py647
-rwxr-xr-xllvm_tools/git_llvm_rev_test.py235
-rwxr-xr-xllvm_tools/git_unittest.py261
-rwxr-xr-xllvm_tools/llvm_bisection.py757
-rwxr-xr-xllvm_tools/llvm_bisection_unittest.py998
-rwxr-xr-xllvm_tools/llvm_local_bisection.sh109
-rwxr-xr-xllvm_tools/llvm_patch_management.py276
-rwxr-xr-xllvm_tools/llvm_patch_management_unittest.py307
-rw-r--r--llvm_tools/llvm_project.py84
-rwxr-xr-xllvm_tools/modify_a_tryjob.py601
-rwxr-xr-xllvm_tools/modify_a_tryjob_unittest.py797
-rwxr-xr-xllvm_tools/nightly_revert_checker.py770
-rwxr-xr-xllvm_tools/nightly_revert_checker_test.py377
-rwxr-xr-xllvm_tools/patch_manager.py1010
-rwxr-xr-xllvm_tools/patch_manager_unittest.py1141
-rw-r--r--llvm_tools/patch_sync/src/android_utils.rs4
-rw-r--r--llvm_tools/patch_sync/src/main.rs75
-rw-r--r--llvm_tools/patch_sync/src/patch_parsing.rs201
-rw-r--r--llvm_tools/patch_sync/src/version_control.rs15
-rw-r--r--llvm_tools/patch_utils.py594
-rwxr-xr-xllvm_tools/patch_utils_unittest.py381
-rwxr-xr-xllvm_tools/revert_checker.py409
-rw-r--r--llvm_tools/subprocess_helpers.py47
-rw-r--r--llvm_tools/test_helpers.py97
-rwxr-xr-xllvm_tools/update_chromeos_llvm_hash.py1194
-rwxr-xr-xllvm_tools/update_chromeos_llvm_hash_unittest.py2034
-rwxr-xr-xllvm_tools/update_packages_and_run_tests.py791
-rwxr-xr-xllvm_tools/update_packages_and_run_tests_unittest.py933
-rwxr-xr-xllvm_tools/update_tryjob_status.py459
-rwxr-xr-xllvm_tools/update_tryjob_status_unittest.py973
-rwxr-xr-xllvm_tools/upload_lexan_crashes_to_forcey.py412
-rwxr-xr-xllvm_tools/upload_lexan_crashes_to_forcey_test.py254
49 files changed, 11347 insertions, 9945 deletions
diff --git a/llvm_tools/README.md b/llvm_tools/README.md
index 74fad6c9..e2ef34f1 100644
--- a/llvm_tools/README.md
+++ b/llvm_tools/README.md
@@ -19,10 +19,9 @@ password)**
### Usage
This script is used for updating a package's LLVM hash (sys-devel/llvm,
-sys-libs/compiler-rt, sys-libs/libcxx, sys-libs/libcxxabi, and
-sys-libs/llvm-libunwind) and then run tests after updating the git hash.
-There are three ways to test the change, including starting tryjobs,
-recipe builders or using cq+1.
+sys-libs/compiler-rt, sys-libs/libcxx, and sys-libs/llvm-libunwind)
+and then run tests after updating the git hash. There are three ways to test
+the change, including starting tryjobs, recipe builders or using cq+1.
An example when this script should be run is when certain boards would like
to be tested with the updated `LLVM_NEXT_HASH`.
@@ -118,41 +117,12 @@ For example, to create a roll CL to the git hash of revision 367622:
```
$ ./update_chromeos_llvm_hash.py \
--update_packages sys-devel/llvm sys-libs/compiler-rt \
- sys-libs/libcxx sys-libs/libcxxabi sys-libs/llvm-libunwind \
+ sys-libs/libcxx sys-libs/llvm-libunwind \
'dev-util/lldb-server' \
--llvm_version 367622 \
--failure_mode disable_patches
```
-## `llvm_patch_management.py`
-
-### Usage
-
-This script is used to test whether a newly added patch in a package's patch
-metadata file would apply successfully. The script is also used to make sure
-the patches of a package applied successfully, failed, etc., depending on the
-failure mode specified.
-
-An example of using this script is when multiple packages would like to be
-tested when a new patch was added to their patch metadata file.
-
-For example:
-
-```
-$ ./llvm_patch_management.py \
- --packages sys-devel/llvm sys-libs/compiler-rt \
- --failure_mode continue
-```
-
-The above example tests sys-devel/llvm and sys-libs/compiler-rt patch metadata
-file with the failure mode `continue`.
-
-For help with the command line arguments of the script, run:
-
-```
-$ ./llvm_patch_management.py --help
-```
-
## `patch_manager.py`
### Usage
@@ -172,7 +142,6 @@ For example, to see all the failed (if any) patches:
$ ./patch_manager.py \
--svn_version 367622 \
--patch_metadata_file /abs/path/to/patch/file \
- --filesdir_path /abs/path/to/$FILESDIR \
--src_path /abs/path/to/src/tree \
--failure_mode continue
```
@@ -183,7 +152,6 @@ For example, to disable all patches that failed to apply:
$ ./patch_manager.py \
--svn_version 367622 \
--patch_metadata_file /abs/path/to/patch/file \
- --filesdir_path /abs/path/to/$FILESDIR \
--src_path /abs/path/to/src/tree \
--failure_mode disable_patches
```
@@ -194,7 +162,6 @@ For example, to remove all patches that no longer apply:
$ ./patch_manager.py \
--svn_version 367622 \
--patch_metadata_file /abs/path/to/patch/file \
- --filesdir_path /abs/path/to/$FILESDIR \
--src_path /abs/path/to/src/tree \
--failure_mode remove_patches
```
@@ -205,7 +172,6 @@ For example, to bisect a failing patch and stop at the first bisected patch:
$ ./patch_manager.py \
--svn_version 367622 \
--patch_metadata_file /abs/path/to/patch/file \
- --filesdir_path /abs/path/to/$FILESDIR \
--src_path /abs/path/to/src/tree \
--failure_mode bisect_patches \
--good_svn_version 365631
@@ -218,7 +184,6 @@ the failed patches:
$ ./patch_manager.py \
--svn_version 367622 \
--patch_metadata_file /abs/path/to/patch/file \
- --filesdir_path /abs/path/to/$FILESDIR \
--src_path /abs/path/to/src/tree \
--failure_mode bisect_patches \
--good_svn_version 365631 \
diff --git a/llvm_tools/auto_llvm_bisection.py b/llvm_tools/auto_llvm_bisection.py
index 7e8fb1dd..3640abae 100755
--- a/llvm_tools/auto_llvm_bisection.py
+++ b/llvm_tools/auto_llvm_bisection.py
@@ -1,12 +1,11 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Performs bisection on LLVM based off a .JSON file."""
-from __future__ import print_function
import enum
import json
@@ -17,10 +16,11 @@ import time
import traceback
import chroot
-from llvm_bisection import BisectionExitStatus
import llvm_bisection
+from llvm_bisection import BisectionExitStatus
import update_tryjob_status
+
# Used to re-try for 'llvm_bisection.py' to attempt to launch more tryjobs.
BISECTION_RETRY_TIME_SECS = 10 * 60
@@ -42,145 +42,167 @@ POLLING_LIMIT_SECS = 18 * 60 * 60
class BuilderStatus(enum.Enum):
- """Actual values given via 'cros buildresult'."""
+ """Actual values given via 'cros buildresult'."""
- PASS = 'pass'
- FAIL = 'fail'
- RUNNING = 'running'
+ PASS = "pass"
+ FAIL = "fail"
+ RUNNING = "running"
builder_status_mapping = {
BuilderStatus.PASS.value: update_tryjob_status.TryjobStatus.GOOD.value,
BuilderStatus.FAIL.value: update_tryjob_status.TryjobStatus.BAD.value,
- BuilderStatus.RUNNING.value: update_tryjob_status.TryjobStatus.PENDING.value
+ BuilderStatus.RUNNING.value: update_tryjob_status.TryjobStatus.PENDING.value,
}
def GetBuildResult(chroot_path, buildbucket_id):
- """Returns the conversion of the result of 'cros buildresult'."""
-
- # Calls 'cros buildresult' to get the status of the tryjob.
- try:
- tryjob_json = subprocess.check_output(
- [
- 'cros_sdk', '--', 'cros', 'buildresult', '--buildbucket-id',
- str(buildbucket_id), '--report', 'json'
- ],
- cwd=chroot_path,
- stderr=subprocess.STDOUT,
- encoding='UTF-8',
- )
- except subprocess.CalledProcessError as err:
- if 'No build found. Perhaps not started' not in err.output:
- raise
- return None
-
- tryjob_content = json.loads(tryjob_json)
-
- build_result = str(tryjob_content['%d' % buildbucket_id]['status'])
-
- # The string returned by 'cros buildresult' might not be in the mapping.
- if build_result not in builder_status_mapping:
- raise ValueError('"cros buildresult" return value is invalid: %s' %
- build_result)
-
- return builder_status_mapping[build_result]
+ """Returns the conversion of the result of 'cros buildresult'."""
+
+ # Calls 'cros buildresult' to get the status of the tryjob.
+ try:
+ tryjob_json = subprocess.check_output(
+ [
+ "cros_sdk",
+ "--",
+ "cros",
+ "buildresult",
+ "--buildbucket-id",
+ str(buildbucket_id),
+ "--report",
+ "json",
+ ],
+ cwd=chroot_path,
+ stderr=subprocess.STDOUT,
+ encoding="UTF-8",
+ )
+ except subprocess.CalledProcessError as err:
+ if "No build found. Perhaps not started" not in err.output:
+ raise
+ return None
+
+ tryjob_content = json.loads(tryjob_json)
+
+ build_result = str(tryjob_content["%d" % buildbucket_id]["status"])
+
+ # The string returned by 'cros buildresult' might not be in the mapping.
+ if build_result not in builder_status_mapping:
+ raise ValueError(
+ '"cros buildresult" return value is invalid: %s' % build_result
+ )
+
+ return builder_status_mapping[build_result]
def main():
- """Bisects LLVM using the result of `cros buildresult` of each tryjob.
-
- Raises:
- AssertionError: The script was run inside the chroot.
- """
+ """Bisects LLVM using the result of `cros buildresult` of each tryjob.
- chroot.VerifyOutsideChroot()
+ Raises:
+ AssertionError: The script was run inside the chroot.
+ """
- args_output = llvm_bisection.GetCommandLineArgs()
+ chroot.VerifyOutsideChroot()
- if os.path.isfile(args_output.last_tested):
- print('Resuming bisection for %s' % args_output.last_tested)
- else:
- print('Starting a new bisection for %s' % args_output.last_tested)
+ args_output = llvm_bisection.GetCommandLineArgs()
- while True:
- # Update the status of existing tryjobs
if os.path.isfile(args_output.last_tested):
- update_start_time = time.time()
- with open(args_output.last_tested) as json_file:
- json_dict = json.load(json_file)
- while True:
- print('\nAttempting to update all tryjobs whose "status" is '
- '"pending":')
- print('-' * 40)
-
- completed = True
- for tryjob in json_dict['jobs']:
- if tryjob[
- 'status'] == update_tryjob_status.TryjobStatus.PENDING.value:
- status = GetBuildResult(args_output.chroot_path,
- tryjob['buildbucket_id'])
- if status:
- tryjob['status'] = status
- else:
- completed = False
-
- print('-' * 40)
-
- # Proceed to the next step if all the existing tryjobs have completed.
- if completed:
- break
-
- delta_time = time.time() - update_start_time
-
- if delta_time > POLLING_LIMIT_SECS:
- # Something is wrong with updating the tryjobs's 'status' via
- # `cros buildresult` (e.g. network issue, etc.).
- sys.exit('Failed to update pending tryjobs.')
-
- print('-' * 40)
- print('Sleeping for %d minutes.' % (POLL_RETRY_TIME_SECS // 60))
- time.sleep(POLL_RETRY_TIME_SECS)
-
- # There should always be update from the tryjobs launched in the
- # last iteration.
- temp_filename = '%s.new' % args_output.last_tested
- with open(temp_filename, 'w') as temp_file:
- json.dump(json_dict, temp_file, indent=4, separators=(',', ': '))
- os.rename(temp_filename, args_output.last_tested)
-
- # Launch more tryjobs.
- for cur_try in range(1, BISECTION_ATTEMPTS + 1):
- try:
- print('\nAttempting to launch more tryjobs if possible:')
- print('-' * 40)
-
- bisection_ret = llvm_bisection.main(args_output)
-
- print('-' * 40)
-
- # Stop if the bisection has completed.
- if bisection_ret == BisectionExitStatus.BISECTION_COMPLETE.value:
- sys.exit(0)
-
- # Successfully launched more tryjobs.
- break
- except Exception:
- traceback.print_exc()
-
- print('-' * 40)
-
- # Exceeded the number of times to launch more tryjobs.
- if cur_try == BISECTION_ATTEMPTS:
- sys.exit('Unable to continue bisection.')
-
- num_retries_left = BISECTION_ATTEMPTS - cur_try
-
- print('Retries left to continue bisection %d.' % num_retries_left)
-
- print('Sleeping for %d minutes.' % (BISECTION_RETRY_TIME_SECS // 60))
- time.sleep(BISECTION_RETRY_TIME_SECS)
-
-
-if __name__ == '__main__':
- main()
+ print("Resuming bisection for %s" % args_output.last_tested)
+ else:
+ print("Starting a new bisection for %s" % args_output.last_tested)
+
+ while True:
+ # Update the status of existing tryjobs
+ if os.path.isfile(args_output.last_tested):
+ update_start_time = time.time()
+ with open(args_output.last_tested) as json_file:
+ json_dict = json.load(json_file)
+ while True:
+ print(
+ '\nAttempting to update all tryjobs whose "status" is '
+ '"pending":'
+ )
+ print("-" * 40)
+
+ completed = True
+ for tryjob in json_dict["jobs"]:
+ if (
+ tryjob["status"]
+ == update_tryjob_status.TryjobStatus.PENDING.value
+ ):
+ status = GetBuildResult(
+ args_output.chroot_path, tryjob["buildbucket_id"]
+ )
+ if status:
+ tryjob["status"] = status
+ else:
+ completed = False
+
+ print("-" * 40)
+
+ # Proceed to the next step if all the existing tryjobs have completed.
+ if completed:
+ break
+
+ delta_time = time.time() - update_start_time
+
+ if delta_time > POLLING_LIMIT_SECS:
+ # Something is wrong with updating the tryjobs's 'status' via
+ # `cros buildresult` (e.g. network issue, etc.).
+ sys.exit("Failed to update pending tryjobs.")
+
+ print("-" * 40)
+ print("Sleeping for %d minutes." % (POLL_RETRY_TIME_SECS // 60))
+ time.sleep(POLL_RETRY_TIME_SECS)
+
+ # There should always be update from the tryjobs launched in the
+ # last iteration.
+ temp_filename = "%s.new" % args_output.last_tested
+ with open(temp_filename, "w") as temp_file:
+ json.dump(
+ json_dict, temp_file, indent=4, separators=(",", ": ")
+ )
+ os.rename(temp_filename, args_output.last_tested)
+
+ # Launch more tryjobs.
+ for cur_try in range(1, BISECTION_ATTEMPTS + 1):
+ try:
+ print("\nAttempting to launch more tryjobs if possible:")
+ print("-" * 40)
+
+ bisection_ret = llvm_bisection.main(args_output)
+
+ print("-" * 40)
+
+ # Stop if the bisection has completed.
+ if (
+ bisection_ret
+ == BisectionExitStatus.BISECTION_COMPLETE.value
+ ):
+ sys.exit(0)
+
+ # Successfully launched more tryjobs.
+ break
+ except Exception:
+ traceback.print_exc()
+
+ print("-" * 40)
+
+ # Exceeded the number of times to launch more tryjobs.
+ if cur_try == BISECTION_ATTEMPTS:
+ sys.exit("Unable to continue bisection.")
+
+ num_retries_left = BISECTION_ATTEMPTS - cur_try
+
+ print(
+ "Retries left to continue bisection %d." % num_retries_left
+ )
+
+ print(
+ "Sleeping for %d minutes."
+ % (BISECTION_RETRY_TIME_SECS // 60)
+ )
+ time.sleep(BISECTION_RETRY_TIME_SECS)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/llvm_tools/auto_llvm_bisection_unittest.py b/llvm_tools/auto_llvm_bisection_unittest.py
index 07c0e715..c70ddee5 100755
--- a/llvm_tools/auto_llvm_bisection_unittest.py
+++ b/llvm_tools/auto_llvm_bisection_unittest.py
@@ -1,12 +1,11 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for auto bisection of LLVM."""
-from __future__ import print_function
import json
import os
@@ -24,230 +23,268 @@ import update_tryjob_status
class AutoLLVMBisectionTest(unittest.TestCase):
- """Unittests for auto bisection of LLVM."""
-
- @mock.patch.object(chroot, 'VerifyOutsideChroot', return_value=True)
- @mock.patch.object(
- llvm_bisection,
- 'GetCommandLineArgs',
- return_value=test_helpers.ArgsOutputTest())
- @mock.patch.object(time, 'sleep')
- @mock.patch.object(traceback, 'print_exc')
- @mock.patch.object(llvm_bisection, 'main')
- @mock.patch.object(os.path, 'isfile')
- @mock.patch.object(auto_llvm_bisection, 'open')
- @mock.patch.object(json, 'load')
- @mock.patch.object(auto_llvm_bisection, 'GetBuildResult')
- @mock.patch.object(os, 'rename')
- def testAutoLLVMBisectionPassed(
- self,
- # pylint: disable=unused-argument
- mock_rename,
- mock_get_build_result,
- mock_json_load,
- # pylint: disable=unused-argument
- mock_open,
- mock_isfile,
- mock_llvm_bisection,
- mock_traceback,
- mock_sleep,
- mock_get_args,
- mock_outside_chroot):
-
- mock_isfile.side_effect = [False, False, True, True]
- mock_llvm_bisection.side_effect = [
- 0,
- ValueError('Failed to launch more tryjobs.'),
- llvm_bisection.BisectionExitStatus.BISECTION_COMPLETE.value
- ]
- mock_json_load.return_value = {
- 'start':
- 369410,
- 'end':
- 369420,
- 'jobs': [{
- 'buildbucket_id': 12345,
- 'rev': 369411,
- 'status': update_tryjob_status.TryjobStatus.PENDING.value,
- }]
- }
- mock_get_build_result.return_value = (
- update_tryjob_status.TryjobStatus.GOOD.value)
-
- # Verify the excpetion is raised when successfully found the bad revision.
- # Uses `sys.exit(0)` to indicate success.
- with self.assertRaises(SystemExit) as err:
- auto_llvm_bisection.main()
-
- self.assertEqual(err.exception.code, 0)
-
- mock_outside_chroot.assert_called_once()
- mock_get_args.assert_called_once()
- self.assertEqual(mock_isfile.call_count, 3)
- self.assertEqual(mock_llvm_bisection.call_count, 3)
- mock_traceback.assert_called_once()
- mock_sleep.assert_called_once()
-
- @mock.patch.object(chroot, 'VerifyOutsideChroot', return_value=True)
- @mock.patch.object(time, 'sleep')
- @mock.patch.object(traceback, 'print_exc')
- @mock.patch.object(llvm_bisection, 'main')
- @mock.patch.object(os.path, 'isfile')
- @mock.patch.object(
- llvm_bisection,
- 'GetCommandLineArgs',
- return_value=test_helpers.ArgsOutputTest())
- def testFailedToStartBisection(self, mock_get_args, mock_isfile,
- mock_llvm_bisection, mock_traceback,
- mock_sleep, mock_outside_chroot):
-
- mock_isfile.return_value = False
- mock_llvm_bisection.side_effect = ValueError(
- 'Failed to launch more tryjobs.')
-
- # Verify the exception is raised when the number of attempts to launched
- # more tryjobs is exceeded, so unable to continue
- # bisection.
- with self.assertRaises(SystemExit) as err:
- auto_llvm_bisection.main()
-
- self.assertEqual(err.exception.code, 'Unable to continue bisection.')
-
- mock_outside_chroot.assert_called_once()
- mock_get_args.assert_called_once()
- self.assertEqual(mock_isfile.call_count, 2)
- self.assertEqual(mock_llvm_bisection.call_count, 3)
- self.assertEqual(mock_traceback.call_count, 3)
- self.assertEqual(mock_sleep.call_count, 2)
-
- @mock.patch.object(chroot, 'VerifyOutsideChroot', return_value=True)
- @mock.patch.object(
- llvm_bisection,
- 'GetCommandLineArgs',
- return_value=test_helpers.ArgsOutputTest())
- @mock.patch.object(time, 'time')
- @mock.patch.object(time, 'sleep')
- @mock.patch.object(os.path, 'isfile')
- @mock.patch.object(auto_llvm_bisection, 'open')
- @mock.patch.object(json, 'load')
- @mock.patch.object(auto_llvm_bisection, 'GetBuildResult')
- def testFailedToUpdatePendingTryJobs(
- self,
- mock_get_build_result,
- mock_json_load,
- # pylint: disable=unused-argument
- mock_open,
- mock_isfile,
- mock_sleep,
- mock_time,
- mock_get_args,
- mock_outside_chroot):
-
- # Simulate behavior of `time.time()` for time passed.
- @test_helpers.CallCountsToMockFunctions
- def MockTimePassed(call_count):
- if call_count < 3:
- return call_count
-
- assert False, 'Called `time.time()` more than expected.'
-
- mock_isfile.return_value = True
- mock_json_load.return_value = {
- 'start':
- 369410,
- 'end':
- 369420,
- 'jobs': [{
- 'buildbucket_id': 12345,
- 'rev': 369411,
- 'status': update_tryjob_status.TryjobStatus.PENDING.value,
- }]
- }
- mock_get_build_result.return_value = None
- mock_time.side_effect = MockTimePassed
- # Reduce the polling limit for the test case to terminate faster.
- auto_llvm_bisection.POLLING_LIMIT_SECS = 1
-
- # Verify the exception is raised when unable to update tryjobs whose
- # 'status' value is 'pending'.
- with self.assertRaises(SystemExit) as err:
- auto_llvm_bisection.main()
-
- self.assertEqual(err.exception.code, 'Failed to update pending tryjobs.')
-
- mock_outside_chroot.assert_called_once()
- mock_get_args.assert_called_once()
- self.assertEqual(mock_isfile.call_count, 2)
- mock_sleep.assert_called_once()
- self.assertEqual(mock_time.call_count, 3)
-
- @mock.patch.object(subprocess, 'check_output')
- def testGetBuildResult(self, mock_chroot_command):
- buildbucket_id = 192
- status = auto_llvm_bisection.BuilderStatus.PASS.value
- tryjob_contents = {buildbucket_id: {'status': status}}
- mock_chroot_command.return_value = json.dumps(tryjob_contents)
- chroot_path = '/some/path/to/chroot'
-
- self.assertEqual(
- auto_llvm_bisection.GetBuildResult(chroot_path, buildbucket_id),
- update_tryjob_status.TryjobStatus.GOOD.value)
-
- mock_chroot_command.assert_called_once_with(
- [
- 'cros_sdk', '--', 'cros', 'buildresult', '--buildbucket-id',
- str(buildbucket_id), '--report', 'json'
- ],
- cwd='/some/path/to/chroot',
- stderr=subprocess.STDOUT,
- encoding='UTF-8',
- )
+ """Unittests for auto bisection of LLVM."""
- @mock.patch.object(subprocess, 'check_output')
- def testGetBuildResultPassedWithUnstartedTryjob(self, mock_chroot_command):
- buildbucket_id = 192
- chroot_path = '/some/path/to/chroot'
- mock_chroot_command.side_effect = subprocess.CalledProcessError(
- returncode=1, cmd=[], output='No build found. Perhaps not started')
- auto_llvm_bisection.GetBuildResult(chroot_path, buildbucket_id)
- mock_chroot_command.assert_called_once_with(
- [
- 'cros_sdk', '--', 'cros', 'buildresult', '--buildbucket-id', '192',
- '--report', 'json'
- ],
- cwd=chroot_path,
- stderr=subprocess.STDOUT,
- encoding='UTF-8',
+ @mock.patch.object(chroot, "VerifyOutsideChroot", return_value=True)
+ @mock.patch.object(
+ llvm_bisection,
+ "GetCommandLineArgs",
+ return_value=test_helpers.ArgsOutputTest(),
)
-
- @mock.patch.object(subprocess, 'check_output')
- def testGetBuildReusultFailedWithInvalidBuildStatus(self,
- mock_chroot_command):
- chroot_path = '/some/path/to/chroot'
- buildbucket_id = 50
- invalid_build_status = 'querying'
- tryjob_contents = {buildbucket_id: {'status': invalid_build_status}}
- mock_chroot_command.return_value = json.dumps(tryjob_contents)
-
- # Verify the exception is raised when the return value of `cros buildresult`
- # is not in the `builder_status_mapping`.
- with self.assertRaises(ValueError) as err:
- auto_llvm_bisection.GetBuildResult(chroot_path, buildbucket_id)
-
- self.assertEqual(
- str(err.exception),
- '"cros buildresult" return value is invalid: %s' % invalid_build_status)
-
- mock_chroot_command.assert_called_once_with(
- [
- 'cros_sdk', '--', 'cros', 'buildresult', '--buildbucket-id',
- str(buildbucket_id), '--report', 'json'
- ],
- cwd=chroot_path,
- stderr=subprocess.STDOUT,
- encoding='UTF-8',
+ @mock.patch.object(time, "sleep")
+ @mock.patch.object(traceback, "print_exc")
+ @mock.patch.object(llvm_bisection, "main")
+ @mock.patch.object(os.path, "isfile")
+ @mock.patch.object(auto_llvm_bisection, "open")
+ @mock.patch.object(json, "load")
+ @mock.patch.object(auto_llvm_bisection, "GetBuildResult")
+ @mock.patch.object(os, "rename")
+ def testAutoLLVMBisectionPassed(
+ self,
+ # pylint: disable=unused-argument
+ mock_rename,
+ mock_get_build_result,
+ mock_json_load,
+ # pylint: disable=unused-argument
+ mock_open,
+ mock_isfile,
+ mock_llvm_bisection,
+ mock_traceback,
+ mock_sleep,
+ mock_get_args,
+ mock_outside_chroot,
+ ):
+
+ mock_isfile.side_effect = [False, False, True, True]
+ mock_llvm_bisection.side_effect = [
+ 0,
+ ValueError("Failed to launch more tryjobs."),
+ llvm_bisection.BisectionExitStatus.BISECTION_COMPLETE.value,
+ ]
+ mock_json_load.return_value = {
+ "start": 369410,
+ "end": 369420,
+ "jobs": [
+ {
+ "buildbucket_id": 12345,
+ "rev": 369411,
+ "status": update_tryjob_status.TryjobStatus.PENDING.value,
+ }
+ ],
+ }
+ mock_get_build_result.return_value = (
+ update_tryjob_status.TryjobStatus.GOOD.value
+ )
+
+ # Verify the excpetion is raised when successfully found the bad revision.
+ # Uses `sys.exit(0)` to indicate success.
+ with self.assertRaises(SystemExit) as err:
+ auto_llvm_bisection.main()
+
+ self.assertEqual(err.exception.code, 0)
+
+ mock_outside_chroot.assert_called_once()
+ mock_get_args.assert_called_once()
+ self.assertEqual(mock_isfile.call_count, 3)
+ self.assertEqual(mock_llvm_bisection.call_count, 3)
+ mock_traceback.assert_called_once()
+ mock_sleep.assert_called_once()
+
+ @mock.patch.object(chroot, "VerifyOutsideChroot", return_value=True)
+ @mock.patch.object(time, "sleep")
+ @mock.patch.object(traceback, "print_exc")
+ @mock.patch.object(llvm_bisection, "main")
+ @mock.patch.object(os.path, "isfile")
+ @mock.patch.object(
+ llvm_bisection,
+ "GetCommandLineArgs",
+ return_value=test_helpers.ArgsOutputTest(),
)
-
-
-if __name__ == '__main__':
- unittest.main()
+ def testFailedToStartBisection(
+ self,
+ mock_get_args,
+ mock_isfile,
+ mock_llvm_bisection,
+ mock_traceback,
+ mock_sleep,
+ mock_outside_chroot,
+ ):
+
+ mock_isfile.return_value = False
+ mock_llvm_bisection.side_effect = ValueError(
+ "Failed to launch more tryjobs."
+ )
+
+ # Verify the exception is raised when the number of attempts to launched
+ # more tryjobs is exceeded, so unable to continue
+ # bisection.
+ with self.assertRaises(SystemExit) as err:
+ auto_llvm_bisection.main()
+
+ self.assertEqual(err.exception.code, "Unable to continue bisection.")
+
+ mock_outside_chroot.assert_called_once()
+ mock_get_args.assert_called_once()
+ self.assertEqual(mock_isfile.call_count, 2)
+ self.assertEqual(mock_llvm_bisection.call_count, 3)
+ self.assertEqual(mock_traceback.call_count, 3)
+ self.assertEqual(mock_sleep.call_count, 2)
+
+ @mock.patch.object(chroot, "VerifyOutsideChroot", return_value=True)
+ @mock.patch.object(
+ llvm_bisection,
+ "GetCommandLineArgs",
+ return_value=test_helpers.ArgsOutputTest(),
+ )
+ @mock.patch.object(time, "time")
+ @mock.patch.object(time, "sleep")
+ @mock.patch.object(os.path, "isfile")
+ @mock.patch.object(auto_llvm_bisection, "open")
+ @mock.patch.object(json, "load")
+ @mock.patch.object(auto_llvm_bisection, "GetBuildResult")
+ def testFailedToUpdatePendingTryJobs(
+ self,
+ mock_get_build_result,
+ mock_json_load,
+ # pylint: disable=unused-argument
+ mock_open,
+ mock_isfile,
+ mock_sleep,
+ mock_time,
+ mock_get_args,
+ mock_outside_chroot,
+ ):
+
+ # Simulate behavior of `time.time()` for time passed.
+ @test_helpers.CallCountsToMockFunctions
+ def MockTimePassed(call_count):
+ if call_count < 3:
+ return call_count
+
+ assert False, "Called `time.time()` more than expected."
+
+ mock_isfile.return_value = True
+ mock_json_load.return_value = {
+ "start": 369410,
+ "end": 369420,
+ "jobs": [
+ {
+ "buildbucket_id": 12345,
+ "rev": 369411,
+ "status": update_tryjob_status.TryjobStatus.PENDING.value,
+ }
+ ],
+ }
+ mock_get_build_result.return_value = None
+ mock_time.side_effect = MockTimePassed
+ # Reduce the polling limit for the test case to terminate faster.
+ auto_llvm_bisection.POLLING_LIMIT_SECS = 1
+
+ # Verify the exception is raised when unable to update tryjobs whose
+ # 'status' value is 'pending'.
+ with self.assertRaises(SystemExit) as err:
+ auto_llvm_bisection.main()
+
+ self.assertEqual(
+ err.exception.code, "Failed to update pending tryjobs."
+ )
+
+ mock_outside_chroot.assert_called_once()
+ mock_get_args.assert_called_once()
+ self.assertEqual(mock_isfile.call_count, 2)
+ mock_sleep.assert_called_once()
+ self.assertEqual(mock_time.call_count, 3)
+
+ @mock.patch.object(subprocess, "check_output")
+ def testGetBuildResult(self, mock_chroot_command):
+ buildbucket_id = 192
+ status = auto_llvm_bisection.BuilderStatus.PASS.value
+ tryjob_contents = {buildbucket_id: {"status": status}}
+ mock_chroot_command.return_value = json.dumps(tryjob_contents)
+ chroot_path = "/some/path/to/chroot"
+
+ self.assertEqual(
+ auto_llvm_bisection.GetBuildResult(chroot_path, buildbucket_id),
+ update_tryjob_status.TryjobStatus.GOOD.value,
+ )
+
+ mock_chroot_command.assert_called_once_with(
+ [
+ "cros_sdk",
+ "--",
+ "cros",
+ "buildresult",
+ "--buildbucket-id",
+ str(buildbucket_id),
+ "--report",
+ "json",
+ ],
+ cwd="/some/path/to/chroot",
+ stderr=subprocess.STDOUT,
+ encoding="UTF-8",
+ )
+
+ @mock.patch.object(subprocess, "check_output")
+ def testGetBuildResultPassedWithUnstartedTryjob(self, mock_chroot_command):
+ buildbucket_id = 192
+ chroot_path = "/some/path/to/chroot"
+ mock_chroot_command.side_effect = subprocess.CalledProcessError(
+ returncode=1, cmd=[], output="No build found. Perhaps not started"
+ )
+ auto_llvm_bisection.GetBuildResult(chroot_path, buildbucket_id)
+ mock_chroot_command.assert_called_once_with(
+ [
+ "cros_sdk",
+ "--",
+ "cros",
+ "buildresult",
+ "--buildbucket-id",
+ "192",
+ "--report",
+ "json",
+ ],
+ cwd=chroot_path,
+ stderr=subprocess.STDOUT,
+ encoding="UTF-8",
+ )
+
+ @mock.patch.object(subprocess, "check_output")
+ def testGetBuildReusultFailedWithInvalidBuildStatus(
+ self, mock_chroot_command
+ ):
+ chroot_path = "/some/path/to/chroot"
+ buildbucket_id = 50
+ invalid_build_status = "querying"
+ tryjob_contents = {buildbucket_id: {"status": invalid_build_status}}
+ mock_chroot_command.return_value = json.dumps(tryjob_contents)
+
+ # Verify the exception is raised when the return value of `cros buildresult`
+ # is not in the `builder_status_mapping`.
+ with self.assertRaises(ValueError) as err:
+ auto_llvm_bisection.GetBuildResult(chroot_path, buildbucket_id)
+
+ self.assertEqual(
+ str(err.exception),
+ '"cros buildresult" return value is invalid: %s'
+ % invalid_build_status,
+ )
+
+ mock_chroot_command.assert_called_once_with(
+ [
+ "cros_sdk",
+ "--",
+ "cros",
+ "buildresult",
+ "--buildbucket-id",
+ str(buildbucket_id),
+ "--report",
+ "json",
+ ],
+ cwd=chroot_path,
+ stderr=subprocess.STDOUT,
+ encoding="UTF-8",
+ )
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/llvm_tools/bisect_clang_crashes.py b/llvm_tools/bisect_clang_crashes.py
index c53db179..b2759051 100755
--- a/llvm_tools/bisect_clang_crashes.py
+++ b/llvm_tools/bisect_clang_crashes.py
@@ -1,10 +1,10 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-"""Fetches and submits the artifacts from Chrome OS toolchain's crash bucket.
+"""Fetches and submits the artifacts from ChromeOS toolchain's crash bucket.
"""
import argparse
@@ -21,117 +21,137 @@ import chroot
def get_artifacts(pattern):
- results = subprocess.check_output(['gsutil.py', 'ls', pattern],
- stderr=subprocess.STDOUT,
- encoding='utf-8')
- return sorted(l.strip() for l in results.splitlines())
+ results = subprocess.check_output(
+ ["gsutil.py", "ls", pattern], stderr=subprocess.STDOUT, encoding="utf-8"
+ )
+ return sorted(l.strip() for l in results.splitlines())
def get_crash_reproducers(working_dir):
- results = []
- for src in [
- f for f in glob.glob('%s/*.c*' % working_dir)
- if f.split('.')[-1] in ['c', 'cc', 'cpp']
- ]:
- script = '.'.join(src.split('.')[:-1]) + '.sh'
- if not os.path.exists(script):
- logging.warning('could not find the matching script of %s', src)
- else:
- results.append((src, script))
- return results
-
-
-def submit_crash_to_forcey(forcey: str, temporary_directory: str,
- buildbucket_id: str, url: str) -> None:
- dest_dir = os.path.join(temporary_directory, buildbucket_id)
- dest_file = os.path.join(dest_dir, os.path.basename(url))
- logging.info('Downloading and submitting %r...', url)
- subprocess.check_output(['gsutil.py', 'cp', url, dest_file],
- stderr=subprocess.STDOUT)
- subprocess.check_output(['tar', '-xJf', dest_file], cwd=dest_dir)
- for src, script in get_crash_reproducers(dest_dir):
- subprocess.check_output([
- forcey, 'reduce', '-wait=false', '-note',
- '%s:%s' % (url, src), '-sh_file', script, '-src_file', src
- ])
+ results = []
+ for src in [
+ f
+ for f in glob.glob("%s/*.c*" % working_dir)
+ if f.split(".")[-1] in ["c", "cc", "cpp"]
+ ]:
+ script = ".".join(src.split(".")[:-1]) + ".sh"
+ if not os.path.exists(script):
+ logging.warning("could not find the matching script of %s", src)
+ else:
+ results.append((src, script))
+ return results
+
+
+def submit_crash_to_forcey(
+ forcey: str, temporary_directory: str, buildbucket_id: str, url: str
+) -> None:
+ dest_dir = os.path.join(temporary_directory, buildbucket_id)
+ dest_file = os.path.join(dest_dir, os.path.basename(url))
+ logging.info("Downloading and submitting %r...", url)
+ subprocess.check_output(
+ ["gsutil.py", "cp", url, dest_file], stderr=subprocess.STDOUT
+ )
+ subprocess.check_output(["tar", "-xJf", dest_file], cwd=dest_dir)
+ for src, script in get_crash_reproducers(dest_dir):
+ subprocess.check_output(
+ [
+ forcey,
+ "reduce",
+ "-wait=false",
+ "-note",
+ "%s:%s" % (url, src),
+ "-sh_file",
+ script,
+ "-src_file",
+ src,
+ ]
+ )
def main(argv):
- chroot.VerifyOutsideChroot()
- logging.basicConfig(
- format='%(asctime)s: %(levelname)s: %(filename)s:%(lineno)d: %(message)s',
- level=logging.INFO,
- )
- cur_dir = os.path.dirname(os.path.abspath(__file__))
- parser = argparse.ArgumentParser(description=__doc__)
- parser.add_argument(
- '--4c', dest='forcey', required=True, help='Path to a 4c client binary')
- parser.add_argument(
- '--state_file',
- default=os.path.join(cur_dir, 'chromeos-state.json'),
- help='The path to the state file.')
- parser.add_argument(
- '--nocleanup',
- action='store_false',
- dest='cleanup',
- help='Keep temporary files created after the script finishes.')
- opts = parser.parse_args(argv)
-
- state_file = os.path.abspath(opts.state_file)
- os.makedirs(os.path.dirname(state_file), exist_ok=True)
- temporary_directory = '/tmp/bisect_clang_crashes'
- os.makedirs(temporary_directory, exist_ok=True)
- urls = get_artifacts('gs://chromeos-toolchain-artifacts/clang-crash-diagnoses'
- '/**/*clang_crash_diagnoses.tar.xz')
- logging.info('%d crash URLs found', len(urls))
-
- visited = {}
- if os.path.exists(state_file):
- buildbucket_ids = {url.split('/')[-2] for url in urls}
- with open(state_file, encoding='utf-8') as f:
- data = json.load(f)
- visited = {k: v for k, v in data.items() if k in buildbucket_ids}
- logging.info('Successfully loaded %d previously-submitted crashes',
- len(visited))
-
- try:
- for url in urls:
- splits = url.split('/')
- buildbucket_id = splits[-2]
- # Skip the builds that has been processed
- if buildbucket_id in visited:
- continue
- submit_crash_to_forcey(
- forcey=opts.forcey,
- temporary_directory=temporary_directory,
- buildbucket_id=buildbucket_id,
- url=url,
- )
- visited[buildbucket_id] = url
-
- exception_in_flight = False
- except:
- exception_in_flight = True
- raise
- finally:
- if exception_in_flight:
- # This is best-effort. If the machine powers off or similar, we'll just
- # resubmit the same crashes, which is suboptimal, but otherwise
- # acceptable.
- logging.error('Something went wrong; attempting to save our work...')
- else:
- logging.info('Persisting state...')
-
- tmp_state_file = state_file + '.tmp'
- with open(tmp_state_file, 'w', encoding='utf-8') as f:
- json.dump(visited, f, indent=2)
- os.rename(tmp_state_file, state_file)
-
- logging.info('State successfully persisted')
-
- if opts.cleanup:
- shutil.rmtree(temporary_directory)
-
-
-if __name__ == '__main__':
- sys.exit(main(sys.argv[1:]))
+ chroot.VerifyOutsideChroot()
+ logging.basicConfig(
+ format="%(asctime)s: %(levelname)s: %(filename)s:%(lineno)d: %(message)s",
+ level=logging.INFO,
+ )
+ cur_dir = os.path.dirname(os.path.abspath(__file__))
+ parser = argparse.ArgumentParser(description=__doc__)
+ parser.add_argument(
+ "--4c", dest="forcey", required=True, help="Path to a 4c client binary"
+ )
+ parser.add_argument(
+ "--state_file",
+ default=os.path.join(cur_dir, "chromeos-state.json"),
+ help="The path to the state file.",
+ )
+ parser.add_argument(
+ "--nocleanup",
+ action="store_false",
+ dest="cleanup",
+ help="Keep temporary files created after the script finishes.",
+ )
+ opts = parser.parse_args(argv)
+
+ state_file = os.path.abspath(opts.state_file)
+ os.makedirs(os.path.dirname(state_file), exist_ok=True)
+ temporary_directory = "/tmp/bisect_clang_crashes"
+ os.makedirs(temporary_directory, exist_ok=True)
+ urls = get_artifacts(
+ "gs://chromeos-toolchain-artifacts/clang-crash-diagnoses"
+ "/**/*clang_crash_diagnoses.tar.xz"
+ )
+ logging.info("%d crash URLs found", len(urls))
+
+ visited = {}
+ if os.path.exists(state_file):
+ buildbucket_ids = {url.split("/")[-2] for url in urls}
+ with open(state_file, encoding="utf-8") as f:
+ data = json.load(f)
+ visited = {k: v for k, v in data.items() if k in buildbucket_ids}
+ logging.info(
+ "Successfully loaded %d previously-submitted crashes", len(visited)
+ )
+
+ try:
+ for url in urls:
+ splits = url.split("/")
+ buildbucket_id = splits[-2]
+ # Skip the builds that has been processed
+ if buildbucket_id in visited:
+ continue
+ submit_crash_to_forcey(
+ forcey=opts.forcey,
+ temporary_directory=temporary_directory,
+ buildbucket_id=buildbucket_id,
+ url=url,
+ )
+ visited[buildbucket_id] = url
+
+ exception_in_flight = False
+ except:
+ exception_in_flight = True
+ raise
+ finally:
+ if exception_in_flight:
+ # This is best-effort. If the machine powers off or similar, we'll just
+ # resubmit the same crashes, which is suboptimal, but otherwise
+ # acceptable.
+ logging.error(
+ "Something went wrong; attempting to save our work..."
+ )
+ else:
+ logging.info("Persisting state...")
+
+ tmp_state_file = state_file + ".tmp"
+ with open(tmp_state_file, "w", encoding="utf-8") as f:
+ json.dump(visited, f, indent=2)
+ os.rename(tmp_state_file, state_file)
+
+ logging.info("State successfully persisted")
+
+ if opts.cleanup:
+ shutil.rmtree(temporary_directory)
+
+
+if __name__ == "__main__":
+ sys.exit(main(sys.argv[1:]))
diff --git a/llvm_tools/bisect_clang_crashes_unittest.py b/llvm_tools/bisect_clang_crashes_unittest.py
index a3dc0c6d..22c9be19 100755
--- a/llvm_tools/bisect_clang_crashes_unittest.py
+++ b/llvm_tools/bisect_clang_crashes_unittest.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -17,75 +17,85 @@ import bisect_clang_crashes
class Test(unittest.TestCase):
- """Tests for bisect_clang_crashes."""
-
- class _SilencingFilter(object):
- """Silences all log messages.
-
- Also collects info about log messages that would've been emitted.
- """
-
- def __init__(self):
- self.messages = []
-
- def filter(self, record):
- self.messages.append(record.getMessage())
- return 0
-
- @mock.patch.object(subprocess, 'check_output')
- def test_get_artifacts(self, mock_gsutil_ls):
- pattern = 'gs://chromeos-toolchain-artifacts/clang-crash-diagnoses/' \
- '**/*clang_crash_diagnoses.tar.xz'
- mock_gsutil_ls.return_value = 'artifact1\nartifact2\nartifact3'
- results = bisect_clang_crashes.get_artifacts(pattern)
- self.assertEqual(results, ['artifact1', 'artifact2', 'artifact3'])
- mock_gsutil_ls.assert_called_once_with(['gsutil.py', 'ls', pattern],
- stderr=subprocess.STDOUT,
- encoding='utf-8')
-
- @mock.patch.object(os.path, 'exists')
- @mock.patch.object(glob, 'glob')
- def test_get_crash_reproducers_succeed(self, mock_file_search,
- mock_file_check):
- working_dir = 'SomeDirectory'
- mock_file_search.return_value = ['a.c', 'b.cpp', 'c.cc']
- mock_file_check.side_effect = [True, True, True]
- results = bisect_clang_crashes.get_crash_reproducers(working_dir)
- mock_file_search.assert_called_once_with('%s/*.c*' % working_dir)
- self.assertEqual(mock_file_check.call_count, 3)
- self.assertEqual(mock_file_check.call_args_list[0], mock.call('a.sh'))
- self.assertEqual(mock_file_check.call_args_list[1], mock.call('b.sh'))
- self.assertEqual(mock_file_check.call_args_list[2], mock.call('c.sh'))
- self.assertEqual(results, [('a.c', 'a.sh'), ('b.cpp', 'b.sh'),
- ('c.cc', 'c.sh')])
-
- @mock.patch.object(os.path, 'exists')
- @mock.patch.object(glob, 'glob')
- def test_get_crash_reproducers_no_matching_script(self, mock_file_search,
- mock_file_check):
-
- def silence_logging():
- root = logging.getLogger()
- filt = self._SilencingFilter()
- root.addFilter(filt)
- self.addCleanup(root.removeFilter, filt)
- return filt
-
- log_filter = silence_logging()
- working_dir = 'SomeDirectory'
- mock_file_search.return_value = ['a.c', 'b.cpp', 'c.cc']
- mock_file_check.side_effect = [True, False, True]
- results = bisect_clang_crashes.get_crash_reproducers(working_dir)
- mock_file_search.assert_called_once_with('%s/*.c*' % working_dir)
- self.assertEqual(mock_file_check.call_count, 3)
- self.assertEqual(mock_file_check.call_args_list[0], mock.call('a.sh'))
- self.assertEqual(mock_file_check.call_args_list[1], mock.call('b.sh'))
- self.assertEqual(mock_file_check.call_args_list[2], mock.call('c.sh'))
- self.assertEqual(results, [('a.c', 'a.sh'), ('c.cc', 'c.sh')])
- self.assertTrue(
- any('could not find the matching script of b.cpp' in x
- for x in log_filter.messages), log_filter.messages)
-
-
-if __name__ == '__main__':
- unittest.main()
+ """Tests for bisect_clang_crashes."""
+
+ class _SilencingFilter(object):
+ """Silences all log messages.
+
+ Also collects info about log messages that would've been emitted.
+ """
+
+ def __init__(self):
+ self.messages = []
+
+ def filter(self, record):
+ self.messages.append(record.getMessage())
+ return 0
+
+ @mock.patch.object(subprocess, "check_output")
+ def test_get_artifacts(self, mock_gsutil_ls):
+ pattern = (
+ "gs://chromeos-toolchain-artifacts/clang-crash-diagnoses/"
+ "**/*clang_crash_diagnoses.tar.xz"
+ )
+ mock_gsutil_ls.return_value = "artifact1\nartifact2\nartifact3"
+ results = bisect_clang_crashes.get_artifacts(pattern)
+ self.assertEqual(results, ["artifact1", "artifact2", "artifact3"])
+ mock_gsutil_ls.assert_called_once_with(
+ ["gsutil.py", "ls", pattern],
+ stderr=subprocess.STDOUT,
+ encoding="utf-8",
+ )
+
+ @mock.patch.object(os.path, "exists")
+ @mock.patch.object(glob, "glob")
+ def test_get_crash_reproducers_succeed(
+ self, mock_file_search, mock_file_check
+ ):
+ working_dir = "SomeDirectory"
+ mock_file_search.return_value = ["a.c", "b.cpp", "c.cc"]
+ mock_file_check.side_effect = [True, True, True]
+ results = bisect_clang_crashes.get_crash_reproducers(working_dir)
+ mock_file_search.assert_called_once_with("%s/*.c*" % working_dir)
+ self.assertEqual(mock_file_check.call_count, 3)
+ self.assertEqual(mock_file_check.call_args_list[0], mock.call("a.sh"))
+ self.assertEqual(mock_file_check.call_args_list[1], mock.call("b.sh"))
+ self.assertEqual(mock_file_check.call_args_list[2], mock.call("c.sh"))
+ self.assertEqual(
+ results, [("a.c", "a.sh"), ("b.cpp", "b.sh"), ("c.cc", "c.sh")]
+ )
+
+ @mock.patch.object(os.path, "exists")
+ @mock.patch.object(glob, "glob")
+ def test_get_crash_reproducers_no_matching_script(
+ self, mock_file_search, mock_file_check
+ ):
+ def silence_logging():
+ root = logging.getLogger()
+ filt = self._SilencingFilter()
+ root.addFilter(filt)
+ self.addCleanup(root.removeFilter, filt)
+ return filt
+
+ log_filter = silence_logging()
+ working_dir = "SomeDirectory"
+ mock_file_search.return_value = ["a.c", "b.cpp", "c.cc"]
+ mock_file_check.side_effect = [True, False, True]
+ results = bisect_clang_crashes.get_crash_reproducers(working_dir)
+ mock_file_search.assert_called_once_with("%s/*.c*" % working_dir)
+ self.assertEqual(mock_file_check.call_count, 3)
+ self.assertEqual(mock_file_check.call_args_list[0], mock.call("a.sh"))
+ self.assertEqual(mock_file_check.call_args_list[1], mock.call("b.sh"))
+ self.assertEqual(mock_file_check.call_args_list[2], mock.call("c.sh"))
+ self.assertEqual(results, [("a.c", "a.sh"), ("c.cc", "c.sh")])
+ self.assertTrue(
+ any(
+ "could not find the matching script of b.cpp" in x
+ for x in log_filter.messages
+ ),
+ log_filter.messages,
+ )
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/llvm_tools/check_clang_diags.py b/llvm_tools/check_clang_diags.py
new file mode 100755
index 00000000..7beb958f
--- /dev/null
+++ b/llvm_tools/check_clang_diags.py
@@ -0,0 +1,223 @@
+#!/usr/bin/env python3
+# Copyright 2022 The ChromiumOS Authors
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""check_clang_diags monitors for new diagnostics in LLVM
+
+This looks at projects we care about (currently only clang-tidy, though
+hopefully clang in the future, too?) and files bugs whenever a new check or
+warning appears. These bugs are intended to keep us up-to-date with new
+diagnostics, so we can enable them as they land.
+"""
+
+import argparse
+import json
+import logging
+import os
+import shutil
+import subprocess
+import sys
+from typing import Dict, List, Tuple
+
+from cros_utils import bugs
+
+
+_DEFAULT_ASSIGNEE = "mage"
+_DEFAULT_CCS = ["cjdb@google.com"]
+
+
+# FIXME: clang would be cool to check, too? Doesn't seem to have a super stable
+# way of listing all warnings, unfortunately.
+def _build_llvm(llvm_dir: str, build_dir: str):
+ """Builds everything that _collect_available_diagnostics depends on."""
+ targets = ["clang-tidy"]
+ # use `-C $llvm_dir` so the failure is easier to handle if llvm_dir DNE.
+ ninja_result = subprocess.run(
+ ["ninja", "-C", build_dir] + targets,
+ check=False,
+ )
+ if not ninja_result.returncode:
+ return
+
+ # Sometimes the directory doesn't exist, sometimes incremental cmake
+ # breaks, sometimes something random happens. Start fresh since that fixes
+ # the issue most of the time.
+ logging.warning("Initial build failed; trying to build from scratch.")
+ shutil.rmtree(build_dir, ignore_errors=True)
+ os.makedirs(build_dir)
+ subprocess.run(
+ [
+ "cmake",
+ "-G",
+ "Ninja",
+ "-DCMAKE_BUILD_TYPE=MinSizeRel",
+ "-DLLVM_USE_LINKER=lld",
+ "-DLLVM_ENABLE_PROJECTS=clang;clang-tools-extra",
+ "-DLLVM_TARGETS_TO_BUILD=X86",
+ f"{os.path.abspath(llvm_dir)}/llvm",
+ ],
+ cwd=build_dir,
+ check=True,
+ )
+ subprocess.run(["ninja"] + targets, check=True, cwd=build_dir)
+
+
+def _collect_available_diagnostics(
+ llvm_dir: str, build_dir: str
+) -> Dict[str, List[str]]:
+ _build_llvm(llvm_dir, build_dir)
+
+ clang_tidy = os.path.join(os.path.abspath(build_dir), "bin", "clang-tidy")
+ clang_tidy_checks = subprocess.run(
+ [clang_tidy, "-checks=*", "-list-checks"],
+ # Use cwd='/' to ensure no .clang-tidy files are picked up. It
+ # _shouldn't_ matter, but it's also ~free, so...
+ check=True,
+ cwd="/",
+ stdout=subprocess.PIPE,
+ encoding="utf-8",
+ )
+ clang_tidy_checks_stdout = [
+ x.strip() for x in clang_tidy_checks.stdout.strip().splitlines()
+ ]
+
+ # The first line should always be this, then each line thereafter is a check
+ # name.
+ assert (
+ clang_tidy_checks_stdout[0] == "Enabled checks:"
+ ), clang_tidy_checks_stdout
+ clang_tidy_checks = clang_tidy_checks_stdout[1:]
+ assert not any(
+ check.isspace() for check in clang_tidy_checks
+ ), clang_tidy_checks
+ return {"clang-tidy": clang_tidy_checks}
+
+
+def _process_new_diagnostics(
+ old: Dict[str, List[str]], new: Dict[str, List[str]]
+) -> Tuple[Dict[str, List[str]], Dict[str, List[str]]]:
+ """Determines the set of new diagnostics that we should file bugs for.
+
+ old: The previous state that this function returned as `new_state_file`, or
+ `{}`
+ new: The diagnostics that we've most recently found. This is a dict in the
+ form {tool: [diag]}
+
+ Returns a `new_state_file` to pass into this function as `old` in the
+ future, and a dict of diags to file bugs about.
+ """
+ new_diagnostics = {}
+ new_state_file = {}
+ for tool, diags in new.items():
+ if tool not in old:
+ logging.info(
+ "New tool with diagnostics: %s; pretending none are new", tool
+ )
+ new_state_file[tool] = diags
+ else:
+ old_diags = set(old[tool])
+ newly_added_diags = [x for x in diags if x not in old_diags]
+ if newly_added_diags:
+ new_diagnostics[tool] = newly_added_diags
+ # This specifically tries to make diags sticky: if one is landed, then
+ # reverted, then relanded, we ignore the reland. This might not be
+ # desirable? I don't know.
+ new_state_file[tool] = old[tool] + newly_added_diags
+
+ # Sort things so we have more predictable output.
+ for v in new_diagnostics.values():
+ v.sort()
+
+ return new_state_file, new_diagnostics
+
+
+def _file_bugs_for_new_diags(new_diags: Dict[str, List[str]]):
+ for tool, diags in sorted(new_diags.items()):
+ for diag in diags:
+ bugs.CreateNewBug(
+ component_id=bugs.WellKnownComponents.CrOSToolchainPublic,
+ title=f"Investigate {tool} check `{diag}`",
+ body="\n".join(
+ (
+ f"It seems that the `{diag}` check was recently added to {tool}.",
+ "It's probably good to TAL at whether this check would be good",
+ "for us to enable in e.g., platform2, or across ChromeOS.",
+ )
+ ),
+ assignee=_DEFAULT_ASSIGNEE,
+ cc=_DEFAULT_CCS,
+ )
+
+
+def main(argv: List[str]):
+ logging.basicConfig(
+ format=">> %(asctime)s: %(levelname)s: %(filename)s:%(lineno)d: "
+ "%(message)s",
+ level=logging.INFO,
+ )
+
+ parser = argparse.ArgumentParser(
+ description=__doc__,
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ )
+ parser.add_argument(
+ "--llvm_dir", required=True, help="LLVM directory to check. Required."
+ )
+ parser.add_argument(
+ "--llvm_build_dir",
+ required=True,
+ help="Build directory for LLVM. Required & autocreated.",
+ )
+ parser.add_argument(
+ "--state_file",
+ required=True,
+ help="State file to use to suppress duplicate complaints. Required.",
+ )
+ parser.add_argument(
+ "--dry_run",
+ action="store_true",
+ help="Skip filing bugs & writing to the state file; just log "
+ "differences.",
+ )
+ opts = parser.parse_args(argv)
+
+ build_dir = opts.llvm_build_dir
+ dry_run = opts.dry_run
+ llvm_dir = opts.llvm_dir
+ state_file = opts.state_file
+
+ try:
+ with open(state_file, encoding="utf-8") as f:
+ prior_diagnostics = json.load(f)
+ except FileNotFoundError:
+ # If the state file didn't exist, just create it without complaining this
+ # time.
+ prior_diagnostics = {}
+
+ available_diagnostics = _collect_available_diagnostics(llvm_dir, build_dir)
+ logging.info("Available diagnostics are %s", available_diagnostics)
+ if available_diagnostics == prior_diagnostics:
+ logging.info("Current diagnostics are identical to previous ones; quit")
+ return
+
+ new_state_file, new_diagnostics = _process_new_diagnostics(
+ prior_diagnostics, available_diagnostics
+ )
+ logging.info("New diagnostics in existing tool(s): %s", new_diagnostics)
+
+ if dry_run:
+ logging.info(
+ "Skipping new state file writing and bug filing; dry-run "
+ "mode wins"
+ )
+ else:
+ _file_bugs_for_new_diags(new_diagnostics)
+ new_state_file_path = state_file + ".new"
+ with open(new_state_file_path, "w", encoding="utf-8") as f:
+ json.dump(new_state_file, f)
+ os.rename(new_state_file_path, state_file)
+
+
+if __name__ == "__main__":
+ main(sys.argv[1:])
diff --git a/llvm_tools/check_clang_diags_test.py b/llvm_tools/check_clang_diags_test.py
new file mode 100755
index 00000000..a7889038
--- /dev/null
+++ b/llvm_tools/check_clang_diags_test.py
@@ -0,0 +1,110 @@
+#!/usr/bin/env python3
+# Copyright 2022 The ChromiumOS Authors
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Tests for check_clang_diags."""
+
+import unittest
+from unittest import mock
+
+import check_clang_diags
+from cros_utils import bugs
+
+
+# pylint: disable=protected-access
+
+
+class Test(unittest.TestCase):
+ """Test class."""
+
+ def test_process_new_diagnostics_ignores_new_tools(self):
+ new_state, new_diags = check_clang_diags._process_new_diagnostics(
+ old={},
+ new={"clang": ["-Wone", "-Wtwo"]},
+ )
+ self.assertEqual(new_state, {"clang": ["-Wone", "-Wtwo"]})
+ self.assertEqual(new_diags, {})
+
+ def test_process_new_diagnostics_is_a_nop_when_no_changes(self):
+ new_state, new_diags = check_clang_diags._process_new_diagnostics(
+ old={"clang": ["-Wone", "-Wtwo"]},
+ new={"clang": ["-Wone", "-Wtwo"]},
+ )
+ self.assertEqual(new_state, {"clang": ["-Wone", "-Wtwo"]})
+ self.assertEqual(new_diags, {})
+
+ def test_process_new_diagnostics_ignores_removals_and_readds(self):
+ new_state, new_diags = check_clang_diags._process_new_diagnostics(
+ old={"clang": ["-Wone", "-Wtwo"]},
+ new={"clang": ["-Wone"]},
+ )
+ self.assertEqual(new_diags, {})
+ new_state, new_diags = check_clang_diags._process_new_diagnostics(
+ old=new_state,
+ new={"clang": ["-Wone", "-Wtwo"]},
+ )
+ self.assertEqual(new_state, {"clang": ["-Wone", "-Wtwo"]})
+ self.assertEqual(new_diags, {})
+
+ def test_process_new_diagnostics_complains_when_warnings_are_added(self):
+ new_state, new_diags = check_clang_diags._process_new_diagnostics(
+ old={"clang": ["-Wone"]},
+ new={"clang": ["-Wone", "-Wtwo"]},
+ )
+ self.assertEqual(new_state, {"clang": ["-Wone", "-Wtwo"]})
+ self.assertEqual(new_diags, {"clang": ["-Wtwo"]})
+
+ @mock.patch.object(bugs, "CreateNewBug")
+ def test_bugs_are_created_as_expected(self, create_new_bug_mock):
+ check_clang_diags._file_bugs_for_new_diags(
+ {
+ "clang": ["-Wone"],
+ "clang-tidy": ["bugprone-foo"],
+ }
+ )
+
+ expected_calls = [
+ mock.call(
+ component_id=bugs.WellKnownComponents.CrOSToolchainPublic,
+ title="Investigate clang check `-Wone`",
+ body="\n".join(
+ (
+ "It seems that the `-Wone` check was recently added to clang.",
+ "It's probably good to TAL at whether this check would be good",
+ "for us to enable in e.g., platform2, or across ChromeOS.",
+ )
+ ),
+ assignee=check_clang_diags._DEFAULT_ASSIGNEE,
+ cc=check_clang_diags._DEFAULT_CCS,
+ ),
+ mock.call(
+ component_id=bugs.WellKnownComponents.CrOSToolchainPublic,
+ title="Investigate clang-tidy check `bugprone-foo`",
+ body="\n".join(
+ (
+ "It seems that the `bugprone-foo` check was recently added to "
+ "clang-tidy.",
+ "It's probably good to TAL at whether this check would be good",
+ "for us to enable in e.g., platform2, or across ChromeOS.",
+ )
+ ),
+ assignee=check_clang_diags._DEFAULT_ASSIGNEE,
+ cc=check_clang_diags._DEFAULT_CCS,
+ ),
+ ]
+
+ # Don't assertEqual the lists, since the diff is really hard to read for
+ # that.
+ for actual, expected in zip(
+ create_new_bug_mock.call_args_list, expected_calls
+ ):
+ self.assertEqual(actual, expected)
+
+ self.assertEqual(
+ len(create_new_bug_mock.call_args_list), len(expected_calls)
+ )
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/llvm_tools/chroot.py b/llvm_tools/chroot.py
index b10ddbac..46464feb 100755
--- a/llvm_tools/chroot.py
+++ b/llvm_tools/chroot.py
@@ -1,96 +1,99 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Chroot helper functions."""
-from __future__ import print_function
+import collections
import os
import subprocess
-import collections
-CommitContents = collections.namedtuple('CommitContents', ['url', 'cl_number'])
+
+CommitContents = collections.namedtuple("CommitContents", ["url", "cl_number"])
def InChroot():
- """Returns True if currently in the chroot."""
- return 'CROS_WORKON_SRCROOT' in os.environ
+ """Returns True if currently in the chroot."""
+ return "CROS_WORKON_SRCROOT" in os.environ
def VerifyOutsideChroot():
- """Checks whether the script invoked was executed in the chroot.
+ """Checks whether the script invoked was executed in the chroot.
- Raises:
- AssertionError: The script was run inside the chroot.
- """
+ Raises:
+ AssertionError: The script was run inside the chroot.
+ """
- assert not InChroot(), 'Script should be run outside the chroot.'
+ assert not InChroot(), "Script should be run outside the chroot."
def GetChrootEbuildPaths(chromeos_root, packages):
- """Gets the chroot path(s) of the package(s).
+ """Gets the chroot path(s) of the package(s).
- Args:
- chromeos_root: The absolute path to the chroot to
- use for executing chroot commands.
- packages: A list of a package/packages to
- be used to find their chroot path.
+ Args:
+ chromeos_root: The absolute path to the chroot to
+ use for executing chroot commands.
+ packages: A list of a package/packages to
+ be used to find their chroot path.
- Returns:
- A list of chroot paths of the packages' ebuild files.
+ Returns:
+ A list of chroot paths of the packages' ebuild files.
- Raises:
- ValueError: Failed to get the chroot path of a package.
- """
+ Raises:
+ ValueError: Failed to get the chroot path of a package.
+ """
- chroot_paths = []
+ chroot_paths = []
- # Find the chroot path for each package's ebuild.
- for package in packages:
- chroot_path = subprocess.check_output(
- ['cros_sdk', '--', 'equery', 'w', package],
- cwd=chromeos_root,
- encoding='utf-8')
- chroot_paths.append(chroot_path.strip())
+ # Find the chroot path for each package's ebuild.
+ for package in packages:
+ chroot_path = subprocess.check_output(
+ ["cros_sdk", "--", "equery", "w", package],
+ cwd=chromeos_root,
+ encoding="utf-8",
+ )
+ chroot_paths.append(chroot_path.strip())
- return chroot_paths
+ return chroot_paths
def ConvertChrootPathsToAbsolutePaths(chromeos_root, chroot_paths):
- """Converts the chroot path(s) to absolute symlink path(s).
+ """Converts the chroot path(s) to absolute symlink path(s).
- Args:
- chromeos_root: The absolute path to the chroot.
- chroot_paths: A list of chroot paths to convert to absolute paths.
+ Args:
+ chromeos_root: The absolute path to the chroot.
+ chroot_paths: A list of chroot paths to convert to absolute paths.
- Returns:
- A list of absolute path(s).
+ Returns:
+ A list of absolute path(s).
- Raises:
- ValueError: Invalid prefix for the chroot path or
- invalid chroot paths were provided.
- """
+ Raises:
+ ValueError: Invalid prefix for the chroot path or
+ invalid chroot paths were provided.
+ """
- abs_paths = []
+ abs_paths = []
- chroot_prefix = '/mnt/host/source/'
+ chroot_prefix = "/mnt/host/source/"
- # Iterate through the chroot paths.
- #
- # For each chroot file path, remove '/mnt/host/source/' prefix
- # and combine the chroot path with the result and add it to the list.
- for chroot_path in chroot_paths:
- if not chroot_path.startswith(chroot_prefix):
- raise ValueError('Invalid prefix for the chroot path: %s' % chroot_path)
+ # Iterate through the chroot paths.
+ #
+ # For each chroot file path, remove '/mnt/host/source/' prefix
+ # and combine the chroot path with the result and add it to the list.
+ for chroot_path in chroot_paths:
+ if not chroot_path.startswith(chroot_prefix):
+ raise ValueError(
+ "Invalid prefix for the chroot path: %s" % chroot_path
+ )
- rel_path = chroot_path[len(chroot_prefix):]
+ rel_path = chroot_path[len(chroot_prefix) :]
- # combine the chromeos root path + '/src/...'
- abs_path = os.path.join(chromeos_root, rel_path)
+ # combine the chromeos root path + '/src/...'
+ abs_path = os.path.join(chromeos_root, rel_path)
- abs_paths.append(abs_path)
+ abs_paths.append(abs_path)
- return abs_paths
+ return abs_paths
diff --git a/llvm_tools/chroot_unittest.py b/llvm_tools/chroot_unittest.py
index 5eec5675..f1a6a626 100755
--- a/llvm_tools/chroot_unittest.py
+++ b/llvm_tools/chroot_unittest.py
@@ -1,12 +1,11 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for chroot helper functions."""
-from __future__ import print_function
import subprocess
import unittest
@@ -14,53 +13,61 @@ import unittest.mock as mock
import chroot
+
# These are unittests; protected access is OK to a point.
# pylint: disable=protected-access
class HelperFunctionsTest(unittest.TestCase):
- """Test class for updating LLVM hashes of packages."""
+ """Test class for updating LLVM hashes of packages."""
- @mock.patch.object(subprocess, 'check_output')
- def testSucceedsToGetChrootEbuildPathForPackage(self, mock_chroot_command):
- package_chroot_path = '/chroot/path/to/package.ebuild'
+ @mock.patch.object(subprocess, "check_output")
+ def testSucceedsToGetChrootEbuildPathForPackage(self, mock_chroot_command):
+ package_chroot_path = "/chroot/path/to/package.ebuild"
- # Emulate ChrootRunCommandWOutput behavior when a chroot path is found for
- # a valid package.
- mock_chroot_command.return_value = package_chroot_path
+ # Emulate ChrootRunCommandWOutput behavior when a chroot path is found for
+ # a valid package.
+ mock_chroot_command.return_value = package_chroot_path
- chroot_path = '/test/chroot/path'
- package_list = ['new-test/package']
+ chroot_path = "/test/chroot/path"
+ package_list = ["new-test/package"]
- self.assertEqual(
- chroot.GetChrootEbuildPaths(chroot_path, package_list),
- [package_chroot_path])
+ self.assertEqual(
+ chroot.GetChrootEbuildPaths(chroot_path, package_list),
+ [package_chroot_path],
+ )
- mock_chroot_command.assert_called_once()
+ mock_chroot_command.assert_called_once()
- def testFailedToConvertChrootPathWithInvalidPrefix(self):
- chroot_path = '/path/to/chroot'
- chroot_file_path = '/src/package.ebuild'
+ def testFailedToConvertChrootPathWithInvalidPrefix(self):
+ chroot_path = "/path/to/chroot"
+ chroot_file_path = "/src/package.ebuild"
- # Verify the exception is raised when a chroot path does not have the prefix
- # '/mnt/host/source/'.
- with self.assertRaises(ValueError) as err:
- chroot.ConvertChrootPathsToAbsolutePaths(chroot_path, [chroot_file_path])
+ # Verify the exception is raised when a chroot path does not have the prefix
+ # '/mnt/host/source/'.
+ with self.assertRaises(ValueError) as err:
+ chroot.ConvertChrootPathsToAbsolutePaths(
+ chroot_path, [chroot_file_path]
+ )
- self.assertEqual(
- str(err.exception), 'Invalid prefix for the chroot path: '
- '%s' % chroot_file_path)
+ self.assertEqual(
+ str(err.exception),
+ "Invalid prefix for the chroot path: " "%s" % chroot_file_path,
+ )
- def testSucceedsToConvertChrootPathToAbsolutePath(self):
- chroot_path = '/path/to/chroot'
- chroot_file_paths = ['/mnt/host/source/src/package.ebuild']
+ def testSucceedsToConvertChrootPathToAbsolutePath(self):
+ chroot_path = "/path/to/chroot"
+ chroot_file_paths = ["/mnt/host/source/src/package.ebuild"]
- expected_abs_path = '/path/to/chroot/src/package.ebuild'
+ expected_abs_path = "/path/to/chroot/src/package.ebuild"
- self.assertEqual(
- chroot.ConvertChrootPathsToAbsolutePaths(
- chroot_path, chroot_file_paths), [expected_abs_path])
+ self.assertEqual(
+ chroot.ConvertChrootPathsToAbsolutePaths(
+ chroot_path, chroot_file_paths
+ ),
+ [expected_abs_path],
+ )
-if __name__ == '__main__':
- unittest.main()
+if __name__ == "__main__":
+ unittest.main()
diff --git a/llvm_tools/copy_helpers_to_chromiumos_overlay.py b/llvm_tools/copy_helpers_to_chromiumos_overlay.py
index 98f7b966..84716aad 100755
--- a/llvm_tools/copy_helpers_to_chromiumos_overlay.py
+++ b/llvm_tools/copy_helpers_to_chromiumos_overlay.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -11,7 +11,6 @@ patch_manager ones). This script simplifies the copying of those around.
"""
# Necessary until crbug.com/1006448 is fixed
-from __future__ import print_function
import argparse
import os
@@ -20,48 +19,53 @@ import sys
def _find_repo_root(script_root):
- repo_root = os.path.abspath(os.path.join(script_root, '../../../../'))
- if not os.path.isdir(os.path.join(repo_root, '.repo')):
- return None
- return repo_root
+ repo_root = os.path.abspath(os.path.join(script_root, "../../../../"))
+ if not os.path.isdir(os.path.join(repo_root, ".repo")):
+ return None
+ return repo_root
def main():
- parser = argparse.ArgumentParser(description=__doc__)
- parser.add_argument(
- '--chroot_path',
- help="Path to where CrOS' source tree lives. Will autodetect if you're "
- 'running this from inside the CrOS source tree.')
- args = parser.parse_args()
+ parser = argparse.ArgumentParser(description=__doc__)
+ parser.add_argument(
+ "--chroot_path",
+ help="Path to where CrOS' source tree lives. Will autodetect if you're "
+ "running this from inside the CrOS source tree.",
+ )
+ args = parser.parse_args()
- my_dir = os.path.abspath(os.path.dirname(__file__))
+ my_dir = os.path.abspath(os.path.dirname(__file__))
- repo_root = args.chroot_path
- if repo_root is None:
- repo_root = _find_repo_root(my_dir)
+ repo_root = args.chroot_path
if repo_root is None:
- sys.exit("Couldn't detect the CrOS checkout root; please provide a "
- 'value for --chroot_path')
+ repo_root = _find_repo_root(my_dir)
+ if repo_root is None:
+ sys.exit(
+ "Couldn't detect the CrOS checkout root; please provide a "
+ "value for --chroot_path"
+ )
- chromiumos_overlay = os.path.join(repo_root,
- 'src/third_party/chromiumos-overlay')
+ chromiumos_overlay = os.path.join(
+ repo_root, "src/third_party/chromiumos-overlay"
+ )
- clone_files = [
- 'failure_modes.py',
- 'get_llvm_hash.py',
- 'git_llvm_rev.py',
- 'patch_manager.py',
- 'subprocess_helpers.py',
- ]
+ clone_files = [
+ "failure_modes.py",
+ "get_llvm_hash.py",
+ "git_llvm_rev.py",
+ "patch_manager.py",
+ "subprocess_helpers.py",
+ ]
- filesdir = os.path.join(chromiumos_overlay,
- 'sys-devel/llvm/files/patch_manager')
- for f in clone_files:
- source = os.path.join(my_dir, f)
- dest = os.path.join(filesdir, f)
- print('%r => %r' % (source, dest))
- shutil.copyfile(source, dest)
+ filesdir = os.path.join(
+ chromiumos_overlay, "sys-devel/llvm/files/patch_manager"
+ )
+ for f in clone_files:
+ source = os.path.join(my_dir, f)
+ dest = os.path.join(filesdir, f)
+ print("%r => %r" % (source, dest))
+ shutil.copyfile(source, dest)
-if __name__ == '__main__':
- main()
+if __name__ == "__main__":
+ main()
diff --git a/llvm_tools/custom_script_example.py b/llvm_tools/custom_script_example.py
index 38dff007..5a320b41 100755
--- a/llvm_tools/custom_script_example.py
+++ b/llvm_tools/custom_script_example.py
@@ -1,12 +1,11 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A custom script example that utilizes the .JSON contents of the tryjob."""
-from __future__ import print_function
import json
import sys
@@ -15,58 +14,61 @@ from update_tryjob_status import TryjobStatus
def main():
- """Determines the exit code based off of the contents of the .JSON file."""
-
- # Index 1 in 'sys.argv' is the path to the .JSON file which contains
- # the contents of the tryjob.
- #
- # Format of the tryjob contents:
- # {
- # "status" : [TRYJOB_STATUS],
- # "buildbucket_id" : [BUILDBUCKET_ID],
- # "extra_cls" : [A_LIST_OF_EXTRA_CLS_PASSED_TO_TRYJOB],
- # "url" : [GERRIT_URL],
- # "builder" : [TRYJOB_BUILDER_LIST],
- # "rev" : [REVISION],
- # "link" : [LINK_TO_TRYJOB],
- # "options" : [A_LIST_OF_OPTIONS_PASSED_TO_TRYJOB]
- # }
- abs_path_json_file = sys.argv[1]
-
- with open(abs_path_json_file) as f:
- tryjob_contents = json.load(f)
-
- CUTOFF_PENDING_REVISION = 369416
-
- SKIP_REVISION_CUTOFF_START = 369420
- SKIP_REVISION_CUTOFF_END = 369428
-
- if tryjob_contents['status'] == TryjobStatus.PENDING.value:
- if tryjob_contents['rev'] <= CUTOFF_PENDING_REVISION:
- # Exit code 0 means to set the tryjob 'status' as 'good'.
- sys.exit(0)
-
- # Exit code 124 means to set the tryjob 'status' as 'bad'.
- sys.exit(124)
-
- if tryjob_contents['status'] == TryjobStatus.BAD.value:
- # Need to take a closer look at the contents of the tryjob to then decide
- # what that tryjob's 'status' value should be.
- #
- # Since the exit code is not in the mapping, an exception will occur which
- # will save the file in the directory of this custom script example.
- sys.exit(1)
-
- if tryjob_contents['status'] == TryjobStatus.SKIP.value:
- # Validate that the 'skip value is really set between the cutoffs.
- if SKIP_REVISION_CUTOFF_START < tryjob_contents['rev'] < \
- SKIP_REVISION_CUTOFF_END:
- # Exit code 125 means to set the tryjob 'status' as 'skip'.
- sys.exit(125)
-
- if tryjob_contents['rev'] >= SKIP_REVISION_CUTOFF_END:
- sys.exit(124)
+ """Determines the exit code based off of the contents of the .JSON file."""
-
-if __name__ == '__main__':
- main()
+ # Index 1 in 'sys.argv' is the path to the .JSON file which contains
+ # the contents of the tryjob.
+ #
+ # Format of the tryjob contents:
+ # {
+ # "status" : [TRYJOB_STATUS],
+ # "buildbucket_id" : [BUILDBUCKET_ID],
+ # "extra_cls" : [A_LIST_OF_EXTRA_CLS_PASSED_TO_TRYJOB],
+ # "url" : [GERRIT_URL],
+ # "builder" : [TRYJOB_BUILDER_LIST],
+ # "rev" : [REVISION],
+ # "link" : [LINK_TO_TRYJOB],
+ # "options" : [A_LIST_OF_OPTIONS_PASSED_TO_TRYJOB]
+ # }
+ abs_path_json_file = sys.argv[1]
+
+ with open(abs_path_json_file) as f:
+ tryjob_contents = json.load(f)
+
+ CUTOFF_PENDING_REVISION = 369416
+
+ SKIP_REVISION_CUTOFF_START = 369420
+ SKIP_REVISION_CUTOFF_END = 369428
+
+ if tryjob_contents["status"] == TryjobStatus.PENDING.value:
+ if tryjob_contents["rev"] <= CUTOFF_PENDING_REVISION:
+ # Exit code 0 means to set the tryjob 'status' as 'good'.
+ sys.exit(0)
+
+ # Exit code 124 means to set the tryjob 'status' as 'bad'.
+ sys.exit(124)
+
+ if tryjob_contents["status"] == TryjobStatus.BAD.value:
+ # Need to take a closer look at the contents of the tryjob to then decide
+ # what that tryjob's 'status' value should be.
+ #
+ # Since the exit code is not in the mapping, an exception will occur which
+ # will save the file in the directory of this custom script example.
+ sys.exit(1)
+
+ if tryjob_contents["status"] == TryjobStatus.SKIP.value:
+ # Validate that the 'skip value is really set between the cutoffs.
+ if (
+ SKIP_REVISION_CUTOFF_START
+ < tryjob_contents["rev"]
+ < SKIP_REVISION_CUTOFF_END
+ ):
+ # Exit code 125 means to set the tryjob 'status' as 'skip'.
+ sys.exit(125)
+
+ if tryjob_contents["rev"] >= SKIP_REVISION_CUTOFF_END:
+ sys.exit(124)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/llvm_tools/failure_modes.py b/llvm_tools/failure_modes.py
index 1e05dfcf..fc4e1fc2 100644
--- a/llvm_tools/failure_modes.py
+++ b/llvm_tools/failure_modes.py
@@ -1,23 +1,22 @@
# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Failure mode constants avaiable to the patch manager."""
-from __future__ import print_function
import enum
class FailureModes(enum.Enum):
- """Different modes for the patch manager when handling a failed patch."""
+ """Different modes for the patch manager when handling a failed patch."""
- FAIL = 'fail'
- CONTINUE = 'continue'
- DISABLE_PATCHES = 'disable_patches'
- BISECT_PATCHES = 'bisect_patches'
- REMOVE_PATCHES = 'remove_patches'
+ FAIL = "fail"
+ CONTINUE = "continue"
+ DISABLE_PATCHES = "disable_patches"
+ BISECT_PATCHES = "bisect_patches"
+ REMOVE_PATCHES = "remove_patches"
- # Only used by 'bisect_patches'.
- INTERNAL_BISECTION = 'internal_bisection'
+ # Only used by 'bisect_patches'.
+ INTERNAL_BISECTION = "internal_bisection"
diff --git a/llvm_tools/fetch_cros_sdk_rolls.py b/llvm_tools/fetch_cros_sdk_rolls.py
index 83d7025a..dc678e10 100755
--- a/llvm_tools/fetch_cros_sdk_rolls.py
+++ b/llvm_tools/fetch_cros_sdk_rolls.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python3
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -14,97 +14,101 @@ import argparse
import json
import logging
import os
+from pathlib import Path
import shutil
import subprocess
import sys
import tempfile
from typing import Dict, List
-from pathlib import Path
def fetch_all_sdk_manifest_paths() -> List[str]:
- """Fetches all paths of SDK manifests; newer = later in the return value."""
- results = subprocess.run(
- ['gsutil', 'ls', 'gs://chromiumos-sdk/cros-sdk-20??.*.Manifest'],
- check=True,
- stdout=subprocess.PIPE,
- encoding='utf-8',
- ).stdout
- # These are named so that sorted order == newest last.
- return sorted(x.strip() for x in results.splitlines())
+ """Fetches all paths of SDK manifests; newer = later in the return value."""
+ results = subprocess.run(
+ ["gsutil", "ls", "gs://chromiumos-sdk/cros-sdk-20??.*.Manifest"],
+ check=True,
+ stdout=subprocess.PIPE,
+ encoding="utf-8",
+ ).stdout
+ # These are named so that sorted order == newest last.
+ return sorted(x.strip() for x in results.splitlines())
def fetch_manifests_into(into_dir: Path, manifests: List[str]):
- # Wrap this in a `try` block because gsutil likes to print to stdout *and*
- # stderr even on success, so we silence them & only print on failure.
- try:
- subprocess.run(
- [
- 'gsutil',
- '-m',
- 'cp',
- '-I',
- str(into_dir),
- ],
- check=True,
- input='\n'.join(manifests),
- stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT,
- encoding='utf-8',
- )
- except subprocess.CalledProcessError as e:
- logging.exception('gsutil failed; output:\n%s', e.stdout)
+ # Wrap this in a `try` block because gsutil likes to print to stdout *and*
+ # stderr even on success, so we silence them & only print on failure.
+ try:
+ subprocess.run(
+ [
+ "gsutil",
+ "-m",
+ "cp",
+ "-I",
+ str(into_dir),
+ ],
+ check=True,
+ input="\n".join(manifests),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ encoding="utf-8",
+ )
+ except subprocess.CalledProcessError as e:
+ logging.exception("gsutil failed; output:\n%s", e.stdout)
def load_manifest_versions(manifest: Path) -> Dict[str, str]:
- with manifest.open(encoding='utf-8') as f:
- raw_versions = json.load(f)
+ with manifest.open(encoding="utf-8") as f:
+ raw_versions = json.load(f)
- # We get a dict of list of lists of versions and some other metadata, e.g.
- # {"foo/bar": [["1.2.3", {}]]}
- # Trim out the metadata.
- return {k: v[0][0] for k, v in raw_versions['packages'].items()}
+ # We get a dict of list of lists of versions and some other metadata, e.g.
+ # {"foo/bar": [["1.2.3", {}]]}
+ # Trim out the metadata.
+ return {k: v[0][0] for k, v in raw_versions["packages"].items()}
def main():
- parser = argparse.ArgumentParser(
- description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
- parser.add_argument(
- '-d', '--debug', action='store_true', help='Emit debugging output')
- parser.add_argument(
- '-n',
- '--number',
- type=int,
- default=20,
- help='Number of recent manifests to fetch info about. 0 means unlimited.')
- args = parser.parse_args()
-
- is_debug = args.debug
- logging.basicConfig(level=logging.DEBUG if is_debug else logging.INFO)
-
- logging.debug('Fetching SDK manifests')
- manifest_paths = fetch_all_sdk_manifest_paths()
- logging.debug('%d SDK manifests fetched', len(manifest_paths))
-
- number = args.number
- if number:
- manifest_paths = manifest_paths[-number:]
-
- tempdir = Path(tempfile.mkdtemp(prefix='cros-sdk-rolls'))
- try:
- logging.debug('Working in tempdir %r', tempdir)
- fetch_manifests_into(tempdir, manifest_paths)
-
- for path in manifest_paths:
- basename = os.path.basename(path)
- versions = load_manifest_versions(tempdir.joinpath(basename))
- print(f'{basename}: {versions["sys-devel/llvm"]}')
- finally:
- if is_debug:
- logging.debug('Keeping around tempdir %r to aid debugging', tempdir)
- else:
- shutil.rmtree(tempdir)
-
-
-if __name__ == '__main__':
- sys.exit(main())
+ parser = argparse.ArgumentParser(
+ description=__doc__,
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ )
+ parser.add_argument(
+ "-d", "--debug", action="store_true", help="Emit debugging output"
+ )
+ parser.add_argument(
+ "-n",
+ "--number",
+ type=int,
+ default=20,
+ help="Number of recent manifests to fetch info about. 0 means unlimited.",
+ )
+ args = parser.parse_args()
+
+ is_debug = args.debug
+ logging.basicConfig(level=logging.DEBUG if is_debug else logging.INFO)
+
+ logging.debug("Fetching SDK manifests")
+ manifest_paths = fetch_all_sdk_manifest_paths()
+ logging.debug("%d SDK manifests fetched", len(manifest_paths))
+
+ number = args.number
+ if number:
+ manifest_paths = manifest_paths[-number:]
+
+ tempdir = Path(tempfile.mkdtemp(prefix="cros-sdk-rolls"))
+ try:
+ logging.debug("Working in tempdir %r", tempdir)
+ fetch_manifests_into(tempdir, manifest_paths)
+
+ for path in manifest_paths:
+ basename = os.path.basename(path)
+ versions = load_manifest_versions(tempdir.joinpath(basename))
+ print(f'{basename}: {versions["sys-devel/llvm"]}')
+ finally:
+ if is_debug:
+ logging.debug("Keeping around tempdir %r to aid debugging", tempdir)
+ else:
+ shutil.rmtree(tempdir)
+
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/llvm_tools/get_llvm_hash.py b/llvm_tools/get_llvm_hash.py
index 83b5ae76..fee8e4f6 100755
--- a/llvm_tools/get_llvm_hash.py
+++ b/llvm_tools/get_llvm_hash.py
@@ -1,12 +1,11 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Returns the latest LLVM version's hash."""
-from __future__ import print_function
import argparse
import contextlib
@@ -22,386 +21,422 @@ import git_llvm_rev
from subprocess_helpers import check_output
from subprocess_helpers import CheckCommand
-_LLVM_GIT_URL = ('https://chromium.googlesource.com/external/github.com/llvm'
- '/llvm-project')
-KNOWN_HASH_SOURCES = {'google3', 'google3-unstable', 'tot'}
+_LLVM_GIT_URL = (
+ "https://chromium.googlesource.com/external/github.com/llvm" "/llvm-project"
+)
+
+KNOWN_HASH_SOURCES = {"google3", "google3-unstable", "tot"}
def GetVersionFrom(src_dir, git_hash):
- """Obtain an SVN-style version number based on the LLVM git hash passed in.
+ """Obtain an SVN-style version number based on the LLVM git hash passed in.
- Args:
- src_dir: LLVM's source directory.
- git_hash: The git hash.
+ Args:
+ src_dir: LLVM's source directory.
+ git_hash: The git hash.
- Returns:
- An SVN-style version number associated with the git hash.
- """
+ Returns:
+ An SVN-style version number associated with the git hash.
+ """
- version = git_llvm_rev.translate_sha_to_rev(
- git_llvm_rev.LLVMConfig(remote='origin', dir=src_dir), git_hash)
- # Note: branches aren't supported
- assert version.branch == git_llvm_rev.MAIN_BRANCH, version.branch
- return version.number
+ version = git_llvm_rev.translate_sha_to_rev(
+ git_llvm_rev.LLVMConfig(remote="origin", dir=src_dir), git_hash
+ )
+ # Note: branches aren't supported
+ assert version.branch == git_llvm_rev.MAIN_BRANCH, version.branch
+ return version.number
def GetGitHashFrom(src_dir, version):
- """Finds the commit hash(es) of the LLVM version in the git log history.
+ """Finds the commit hash(es) of the LLVM version in the git log history.
- Args:
- src_dir: The LLVM source tree.
- version: The version number.
+ Args:
+ src_dir: The LLVM source tree.
+ version: The version number.
- Returns:
- A git hash string corresponding to the version number.
+ Returns:
+ A git hash string corresponding to the version number.
- Raises:
- subprocess.CalledProcessError: Failed to find a git hash.
- """
+ Raises:
+ subprocess.CalledProcessError: Failed to find a git hash.
+ """
- return git_llvm_rev.translate_rev_to_sha(
- git_llvm_rev.LLVMConfig(remote='origin', dir=src_dir),
- git_llvm_rev.Rev(branch=git_llvm_rev.MAIN_BRANCH, number=version))
+ return git_llvm_rev.translate_rev_to_sha(
+ git_llvm_rev.LLVMConfig(remote="origin", dir=src_dir),
+ git_llvm_rev.Rev(branch=git_llvm_rev.MAIN_BRANCH, number=version),
+ )
def CheckoutBranch(src_dir, branch):
- """Checks out and pulls from a branch in a git repo.
+ """Checks out and pulls from a branch in a git repo.
- Args:
- src_dir: The LLVM source tree.
- branch: The git branch to checkout in src_dir.
+ Args:
+ src_dir: The LLVM source tree.
+ branch: The git branch to checkout in src_dir.
- Raises:
- ValueError: Failed to checkout or pull branch version
- """
- CheckCommand(['git', '-C', src_dir, 'checkout', branch])
- CheckCommand(['git', '-C', src_dir, 'pull'])
+ Raises:
+ ValueError: Failed to checkout or pull branch version
+ """
+ CheckCommand(["git", "-C", src_dir, "checkout", branch])
+ CheckCommand(["git", "-C", src_dir, "pull"])
def ParseLLVMMajorVersion(cmakelist):
- """Reads CMakeList.txt file contents for LLVMMajor Version.
+ """Reads CMakeList.txt file contents for LLVMMajor Version.
- Args:
- cmakelist: contents of CMakeList.txt
+ Args:
+ cmakelist: contents of CMakeList.txt
- Returns:
- The major version number as a string
+ Returns:
+ The major version number as a string
- Raises:
- ValueError: The major version cannot be parsed from cmakelist
- """
- match = re.search(r'\n\s+set\(LLVM_VERSION_MAJOR (?P<major>\d+)\)', cmakelist)
- if not match:
- raise ValueError('Failed to parse CMakeList for llvm major version')
- return match.group('major')
+ Raises:
+ ValueError: The major version cannot be parsed from cmakelist
+ """
+ match = re.search(
+ r"\n\s+set\(LLVM_VERSION_MAJOR (?P<major>\d+)\)", cmakelist
+ )
+ if not match:
+ raise ValueError("Failed to parse CMakeList for llvm major version")
+ return match.group("major")
@functools.lru_cache(maxsize=1)
def GetLLVMMajorVersion(git_hash=None):
- """Reads llvm/CMakeList.txt file contents for LLVMMajor Version.
-
- Args:
- git_hash: git hash of llvm version as string or None for top of trunk
-
- Returns:
- The major version number as a string
-
- Raises:
- ValueError: The major version cannot be parsed from cmakelist or
- there was a failure to checkout git_hash version
- FileExistsError: The src directory doe not contain CMakeList.txt
- """
- src_dir = GetAndUpdateLLVMProjectInLLVMTools()
- cmakelists_path = os.path.join(src_dir, 'llvm', 'CMakeLists.txt')
- if git_hash:
- CheckCommand(['git', '-C', src_dir, 'checkout', git_hash])
- try:
- with open(cmakelists_path) as cmakelists_file:
- return ParseLLVMMajorVersion(cmakelists_file.read())
- finally:
+ """Reads llvm/CMakeList.txt file contents for LLVMMajor Version.
+
+ Args:
+ git_hash: git hash of llvm version as string or None for top of trunk
+
+ Returns:
+ The major version number as a string
+
+ Raises:
+ ValueError: The major version cannot be parsed from cmakelist or
+ there was a failure to checkout git_hash version
+ FileExistsError: The src directory doe not contain CMakeList.txt
+ """
+ src_dir = GetAndUpdateLLVMProjectInLLVMTools()
+ cmakelists_path = os.path.join(src_dir, "llvm", "CMakeLists.txt")
if git_hash:
- CheckoutBranch(src_dir, git_llvm_rev.MAIN_BRANCH)
+ CheckCommand(["git", "-C", src_dir, "checkout", git_hash])
+ try:
+ with open(cmakelists_path) as cmakelists_file:
+ return ParseLLVMMajorVersion(cmakelists_file.read())
+ finally:
+ if git_hash:
+ CheckoutBranch(src_dir, git_llvm_rev.MAIN_BRANCH)
@contextlib.contextmanager
def CreateTempLLVMRepo(temp_dir):
- """Adds a LLVM worktree to 'temp_dir'.
+ """Adds a LLVM worktree to 'temp_dir'.
- Creating a worktree because the LLVM source tree in
- '../toolchain-utils/llvm_tools/llvm-project-copy' should not be modified.
+ Creating a worktree because the LLVM source tree in
+ '../toolchain-utils/llvm_tools/llvm-project-copy' should not be modified.
- This is useful for applying patches to a source tree but do not want to modify
- the actual LLVM source tree in 'llvm-project-copy'.
+ This is useful for applying patches to a source tree but do not want to modify
+ the actual LLVM source tree in 'llvm-project-copy'.
- Args:
- temp_dir: An absolute path to the temporary directory to put the worktree in
- (obtained via 'tempfile.mkdtemp()').
+ Args:
+ temp_dir: An absolute path to the temporary directory to put the worktree in
+ (obtained via 'tempfile.mkdtemp()').
- Yields:
- The absolute path to 'temp_dir'.
+ Yields:
+ The absolute path to 'temp_dir'.
- Raises:
- subprocess.CalledProcessError: Failed to remove the worktree.
- ValueError: Failed to add a worktree.
- """
+ Raises:
+ subprocess.CalledProcessError: Failed to remove the worktree.
+ ValueError: Failed to add a worktree.
+ """
- abs_path_to_llvm_project_dir = GetAndUpdateLLVMProjectInLLVMTools()
- CheckCommand([
- 'git', '-C', abs_path_to_llvm_project_dir, 'worktree', 'add', '--detach',
- temp_dir,
- 'origin/%s' % git_llvm_rev.MAIN_BRANCH
- ])
+ abs_path_to_llvm_project_dir = GetAndUpdateLLVMProjectInLLVMTools()
+ CheckCommand(
+ [
+ "git",
+ "-C",
+ abs_path_to_llvm_project_dir,
+ "worktree",
+ "add",
+ "--detach",
+ temp_dir,
+ "origin/%s" % git_llvm_rev.MAIN_BRANCH,
+ ]
+ )
- try:
- yield temp_dir
- finally:
- if os.path.isdir(temp_dir):
- check_output([
- 'git', '-C', abs_path_to_llvm_project_dir, 'worktree', 'remove', '-f',
- temp_dir
- ])
+ try:
+ yield temp_dir
+ finally:
+ if os.path.isdir(temp_dir):
+ check_output(
+ [
+ "git",
+ "-C",
+ abs_path_to_llvm_project_dir,
+ "worktree",
+ "remove",
+ "-f",
+ temp_dir,
+ ]
+ )
def GetAndUpdateLLVMProjectInLLVMTools():
- """Gets the absolute path to 'llvm-project-copy' directory in 'llvm_tools'.
+ """Gets the absolute path to 'llvm-project-copy' directory in 'llvm_tools'.
- The intent of this function is to avoid cloning the LLVM repo and then
- discarding the contents of the repo. The function will create a directory
- in '../toolchain-utils/llvm_tools' called 'llvm-project-copy' if this
- directory does not exist yet. If it does not exist, then it will use the
- LLVMHash() class to clone the LLVM repo into 'llvm-project-copy'. Otherwise,
- it will clean the contents of that directory and then fetch from the chromium
- LLVM mirror. In either case, this function will return the absolute path to
- 'llvm-project-copy' directory.
+ The intent of this function is to avoid cloning the LLVM repo and then
+ discarding the contents of the repo. The function will create a directory
+ in '../toolchain-utils/llvm_tools' called 'llvm-project-copy' if this
+ directory does not exist yet. If it does not exist, then it will use the
+ LLVMHash() class to clone the LLVM repo into 'llvm-project-copy'. Otherwise,
+ it will clean the contents of that directory and then fetch from the chromium
+ LLVM mirror. In either case, this function will return the absolute path to
+ 'llvm-project-copy' directory.
- Returns:
- Absolute path to 'llvm-project-copy' directory in 'llvm_tools'
+ Returns:
+ Absolute path to 'llvm-project-copy' directory in 'llvm_tools'
- Raises:
- ValueError: LLVM repo (in 'llvm-project-copy' dir.) has changes or failed to
- checkout to main or failed to fetch from chromium mirror of LLVM.
- """
+ Raises:
+ ValueError: LLVM repo (in 'llvm-project-copy' dir.) has changes or failed to
+ checkout to main or failed to fetch from chromium mirror of LLVM.
+ """
- abs_path_to_llvm_tools_dir = os.path.dirname(os.path.abspath(__file__))
+ abs_path_to_llvm_tools_dir = os.path.dirname(os.path.abspath(__file__))
- abs_path_to_llvm_project_dir = os.path.join(abs_path_to_llvm_tools_dir,
- 'llvm-project-copy')
+ abs_path_to_llvm_project_dir = os.path.join(
+ abs_path_to_llvm_tools_dir, "llvm-project-copy"
+ )
- if not os.path.isdir(abs_path_to_llvm_project_dir):
- print(
- (f'Checking out LLVM to {abs_path_to_llvm_project_dir}\n'
- 'so that we can map between commit hashes and revision numbers.\n'
- 'This may take a while, but only has to be done once.'),
- file=sys.stderr)
- os.mkdir(abs_path_to_llvm_project_dir)
+ if not os.path.isdir(abs_path_to_llvm_project_dir):
+ print(
+ (
+ f"Checking out LLVM to {abs_path_to_llvm_project_dir}\n"
+ "so that we can map between commit hashes and revision numbers.\n"
+ "This may take a while, but only has to be done once."
+ ),
+ file=sys.stderr,
+ )
+ os.mkdir(abs_path_to_llvm_project_dir)
- LLVMHash().CloneLLVMRepo(abs_path_to_llvm_project_dir)
- else:
- # `git status` has a '-s'/'--short' option that shortens the output.
- # With the '-s' option, if no changes were made to the LLVM repo, then the
- # output (assigned to 'repo_status') would be empty.
- repo_status = check_output(
- ['git', '-C', abs_path_to_llvm_project_dir, 'status', '-s'])
+ LLVMHash().CloneLLVMRepo(abs_path_to_llvm_project_dir)
+ else:
+ # `git status` has a '-s'/'--short' option that shortens the output.
+ # With the '-s' option, if no changes were made to the LLVM repo, then the
+ # output (assigned to 'repo_status') would be empty.
+ repo_status = check_output(
+ ["git", "-C", abs_path_to_llvm_project_dir, "status", "-s"]
+ )
- if repo_status.rstrip():
- raise ValueError('LLVM repo in %s has changes, please remove.' %
- abs_path_to_llvm_project_dir)
+ if repo_status.rstrip():
+ raise ValueError(
+ "LLVM repo in %s has changes, please remove."
+ % abs_path_to_llvm_project_dir
+ )
- CheckoutBranch(abs_path_to_llvm_project_dir, git_llvm_rev.MAIN_BRANCH)
+ CheckoutBranch(abs_path_to_llvm_project_dir, git_llvm_rev.MAIN_BRANCH)
- return abs_path_to_llvm_project_dir
+ return abs_path_to_llvm_project_dir
def GetGoogle3LLVMVersion(stable):
- """Gets the latest google3 LLVM version.
+ """Gets the latest google3 LLVM version.
- Args:
- stable: boolean, use the stable version or the unstable version
+ Args:
+ stable: boolean, use the stable version or the unstable version
- Returns:
- The latest LLVM SVN version as an integer.
+ Returns:
+ The latest LLVM SVN version as an integer.
- Raises:
- subprocess.CalledProcessError: An invalid path has been provided to the
- `cat` command.
- """
+ Raises:
+ subprocess.CalledProcessError: An invalid path has been provided to the
+ `cat` command.
+ """
- subdir = 'stable' if stable else 'llvm_unstable'
+ subdir = "stable" if stable else "llvm_unstable"
- # Cmd to get latest google3 LLVM version.
- cmd = [
- 'cat',
- os.path.join('/google/src/head/depot/google3/third_party/crosstool/v18',
- subdir, 'installs/llvm/git_origin_rev_id')
- ]
+ # Cmd to get latest google3 LLVM version.
+ cmd = [
+ "cat",
+ os.path.join(
+ "/google/src/head/depot/google3/third_party/crosstool/v18",
+ subdir,
+ "installs/llvm/git_origin_rev_id",
+ ),
+ ]
- # Get latest version.
- git_hash = check_output(cmd)
+ # Get latest version.
+ git_hash = check_output(cmd)
- # Change type to an integer
- return GetVersionFrom(GetAndUpdateLLVMProjectInLLVMTools(), git_hash.rstrip())
+ # Change type to an integer
+ return GetVersionFrom(
+ GetAndUpdateLLVMProjectInLLVMTools(), git_hash.rstrip()
+ )
def IsSvnOption(svn_option):
- """Validates whether the argument (string) is a git hash option.
+ """Validates whether the argument (string) is a git hash option.
- The argument is used to find the git hash of LLVM.
+ The argument is used to find the git hash of LLVM.
- Args:
- svn_option: The option passed in as a command line argument.
+ Args:
+ svn_option: The option passed in as a command line argument.
- Returns:
- lowercase svn_option if it is a known hash source, otherwise the svn_option
- as an int
+ Returns:
+ lowercase svn_option if it is a known hash source, otherwise the svn_option
+ as an int
- Raises:
- ValueError: Invalid svn option provided.
- """
+ Raises:
+ ValueError: Invalid svn option provided.
+ """
- if svn_option.lower() in KNOWN_HASH_SOURCES:
- return svn_option.lower()
+ if svn_option.lower() in KNOWN_HASH_SOURCES:
+ return svn_option.lower()
- try:
- svn_version = int(svn_option)
+ try:
+ svn_version = int(svn_option)
- return svn_version
+ return svn_version
- # Unable to convert argument to an int, so the option is invalid.
- #
- # Ex: 'one'.
- except ValueError:
- pass
+ # Unable to convert argument to an int, so the option is invalid.
+ #
+ # Ex: 'one'.
+ except ValueError:
+ pass
- raise ValueError('Invalid LLVM git hash option provided: %s' % svn_option)
+ raise ValueError("Invalid LLVM git hash option provided: %s" % svn_option)
def GetLLVMHashAndVersionFromSVNOption(svn_option):
- """Gets the LLVM hash and LLVM version based off of the svn option.
+ """Gets the LLVM hash and LLVM version based off of the svn option.
- Args:
- svn_option: A valid svn option obtained from the command line.
- Ex. 'google3', 'tot', or <svn_version> such as 365123.
+ Args:
+ svn_option: A valid svn option obtained from the command line.
+ Ex. 'google3', 'tot', or <svn_version> such as 365123.
- Returns:
- A tuple that is the LLVM git hash and LLVM version.
- """
+ Returns:
+ A tuple that is the LLVM git hash and LLVM version.
+ """
- new_llvm_hash = LLVMHash()
+ new_llvm_hash = LLVMHash()
- # Determine which LLVM git hash to retrieve.
- if svn_option == 'tot':
- git_hash = new_llvm_hash.GetTopOfTrunkGitHash()
- version = GetVersionFrom(GetAndUpdateLLVMProjectInLLVMTools(), git_hash)
- elif isinstance(svn_option, int):
- version = svn_option
- git_hash = GetGitHashFrom(GetAndUpdateLLVMProjectInLLVMTools(), version)
- else:
- assert svn_option in ('google3', 'google3-unstable')
- version = GetGoogle3LLVMVersion(stable=svn_option == 'google3')
+ # Determine which LLVM git hash to retrieve.
+ if svn_option == "tot":
+ git_hash = new_llvm_hash.GetTopOfTrunkGitHash()
+ version = GetVersionFrom(GetAndUpdateLLVMProjectInLLVMTools(), git_hash)
+ elif isinstance(svn_option, int):
+ version = svn_option
+ git_hash = GetGitHashFrom(GetAndUpdateLLVMProjectInLLVMTools(), version)
+ else:
+ assert svn_option in ("google3", "google3-unstable")
+ version = GetGoogle3LLVMVersion(stable=svn_option == "google3")
- git_hash = GetGitHashFrom(GetAndUpdateLLVMProjectInLLVMTools(), version)
+ git_hash = GetGitHashFrom(GetAndUpdateLLVMProjectInLLVMTools(), version)
- return git_hash, version
+ return git_hash, version
class LLVMHash(object):
- """Provides methods to retrieve a LLVM hash."""
+ """Provides methods to retrieve a LLVM hash."""
- @staticmethod
- @contextlib.contextmanager
- def CreateTempDirectory():
- temp_dir = tempfile.mkdtemp()
+ @staticmethod
+ @contextlib.contextmanager
+ def CreateTempDirectory():
+ temp_dir = tempfile.mkdtemp()
- try:
- yield temp_dir
- finally:
- if os.path.isdir(temp_dir):
- shutil.rmtree(temp_dir, ignore_errors=True)
+ try:
+ yield temp_dir
+ finally:
+ if os.path.isdir(temp_dir):
+ shutil.rmtree(temp_dir, ignore_errors=True)
- def CloneLLVMRepo(self, temp_dir):
- """Clones the LLVM repo.
+ def CloneLLVMRepo(self, temp_dir):
+ """Clones the LLVM repo.
- Args:
- temp_dir: The temporary directory to clone the repo to.
+ Args:
+ temp_dir: The temporary directory to clone the repo to.
- Raises:
- ValueError: Failed to clone the LLVM repo.
- """
+ Raises:
+ ValueError: Failed to clone the LLVM repo.
+ """
- clone_cmd = ['git', 'clone', _LLVM_GIT_URL, temp_dir]
+ clone_cmd = ["git", "clone", _LLVM_GIT_URL, temp_dir]
- clone_cmd_obj = subprocess.Popen(clone_cmd, stderr=subprocess.PIPE)
- _, stderr = clone_cmd_obj.communicate()
+ clone_cmd_obj = subprocess.Popen(clone_cmd, stderr=subprocess.PIPE)
+ _, stderr = clone_cmd_obj.communicate()
- if clone_cmd_obj.returncode:
- raise ValueError('Failed to clone the LLVM repo: %s' % stderr)
+ if clone_cmd_obj.returncode:
+ raise ValueError("Failed to clone the LLVM repo: %s" % stderr)
- def GetLLVMHash(self, version):
- """Retrieves the LLVM hash corresponding to the LLVM version passed in.
+ def GetLLVMHash(self, version):
+ """Retrieves the LLVM hash corresponding to the LLVM version passed in.
- Args:
- version: The LLVM version to use as a delimiter.
+ Args:
+ version: The LLVM version to use as a delimiter.
- Returns:
- The hash as a string that corresponds to the LLVM version.
- """
+ Returns:
+ The hash as a string that corresponds to the LLVM version.
+ """
- hash_value = GetGitHashFrom(GetAndUpdateLLVMProjectInLLVMTools(), version)
- return hash_value
+ hash_value = GetGitHashFrom(
+ GetAndUpdateLLVMProjectInLLVMTools(), version
+ )
+ return hash_value
- def GetGoogle3LLVMHash(self):
- """Retrieves the google3 LLVM hash."""
+ def GetGoogle3LLVMHash(self):
+ """Retrieves the google3 LLVM hash."""
- return self.GetLLVMHash(GetGoogle3LLVMVersion(stable=True))
+ return self.GetLLVMHash(GetGoogle3LLVMVersion(stable=True))
- def GetGoogle3UnstableLLVMHash(self):
- """Retrieves the LLVM hash of google3's unstable compiler."""
- return self.GetLLVMHash(GetGoogle3LLVMVersion(stable=False))
+ def GetGoogle3UnstableLLVMHash(self):
+ """Retrieves the LLVM hash of google3's unstable compiler."""
+ return self.GetLLVMHash(GetGoogle3LLVMVersion(stable=False))
- def GetTopOfTrunkGitHash(self):
- """Gets the latest git hash from top of trunk of LLVM."""
+ def GetTopOfTrunkGitHash(self):
+ """Gets the latest git hash from top of trunk of LLVM."""
- path_to_main_branch = 'refs/heads/main'
- llvm_tot_git_hash = check_output(
- ['git', 'ls-remote', _LLVM_GIT_URL, path_to_main_branch])
- return llvm_tot_git_hash.rstrip().split()[0]
+ path_to_main_branch = "refs/heads/main"
+ llvm_tot_git_hash = check_output(
+ ["git", "ls-remote", _LLVM_GIT_URL, path_to_main_branch]
+ )
+ return llvm_tot_git_hash.rstrip().split()[0]
def main():
- """Prints the git hash of LLVM.
-
- Parses the command line for the optional command line
- arguments.
- """
-
- # Create parser and add optional command-line arguments.
- parser = argparse.ArgumentParser(description='Finds the LLVM hash.')
- parser.add_argument(
- '--llvm_version',
- type=IsSvnOption,
- required=True,
- help='which git hash of LLVM to find. Either a svn revision, or one '
- 'of %s' % sorted(KNOWN_HASH_SOURCES))
-
- # Parse command-line arguments.
- args_output = parser.parse_args()
-
- cur_llvm_version = args_output.llvm_version
-
- new_llvm_hash = LLVMHash()
-
- if isinstance(cur_llvm_version, int):
- # Find the git hash of the specific LLVM version.
- print(new_llvm_hash.GetLLVMHash(cur_llvm_version))
- elif cur_llvm_version == 'google3':
- print(new_llvm_hash.GetGoogle3LLVMHash())
- elif cur_llvm_version == 'google3-unstable':
- print(new_llvm_hash.GetGoogle3UnstableLLVMHash())
- else:
- assert cur_llvm_version == 'tot'
- print(new_llvm_hash.GetTopOfTrunkGitHash())
-
-
-if __name__ == '__main__':
- main()
+ """Prints the git hash of LLVM.
+
+ Parses the command line for the optional command line
+ arguments.
+ """
+
+ # Create parser and add optional command-line arguments.
+ parser = argparse.ArgumentParser(description="Finds the LLVM hash.")
+ parser.add_argument(
+ "--llvm_version",
+ type=IsSvnOption,
+ required=True,
+ help="which git hash of LLVM to find. Either a svn revision, or one "
+ "of %s" % sorted(KNOWN_HASH_SOURCES),
+ )
+
+ # Parse command-line arguments.
+ args_output = parser.parse_args()
+
+ cur_llvm_version = args_output.llvm_version
+
+ new_llvm_hash = LLVMHash()
+
+ if isinstance(cur_llvm_version, int):
+ # Find the git hash of the specific LLVM version.
+ print(new_llvm_hash.GetLLVMHash(cur_llvm_version))
+ elif cur_llvm_version == "google3":
+ print(new_llvm_hash.GetGoogle3LLVMHash())
+ elif cur_llvm_version == "google3-unstable":
+ print(new_llvm_hash.GetGoogle3UnstableLLVMHash())
+ else:
+ assert cur_llvm_version == "tot"
+ print(new_llvm_hash.GetTopOfTrunkGitHash())
+
+
+if __name__ == "__main__":
+ main()
diff --git a/llvm_tools/get_llvm_hash_unittest.py b/llvm_tools/get_llvm_hash_unittest.py
index 49740f33..17a094b4 100755
--- a/llvm_tools/get_llvm_hash_unittest.py
+++ b/llvm_tools/get_llvm_hash_unittest.py
@@ -1,12 +1,11 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for retrieving the LLVM hash."""
-from __future__ import print_function
import subprocess
import unittest
@@ -15,125 +14,148 @@ import unittest.mock as mock
import get_llvm_hash
from get_llvm_hash import LLVMHash
+
# We grab protected stuff from get_llvm_hash. That's OK.
# pylint: disable=protected-access
def MakeMockPopen(return_code):
+ def MockPopen(*_args, **_kwargs):
+ result = mock.MagicMock()
+ result.returncode = return_code
- def MockPopen(*_args, **_kwargs):
- result = mock.MagicMock()
- result.returncode = return_code
-
- communicate_result = result.communicate.return_value
- # Communicate returns stdout, stderr.
- communicate_result.__iter__.return_value = (None, 'some stderr')
- return result
+ communicate_result = result.communicate.return_value
+ # Communicate returns stdout, stderr.
+ communicate_result.__iter__.return_value = (None, "some stderr")
+ return result
- return MockPopen
+ return MockPopen
class TestGetLLVMHash(unittest.TestCase):
- """The LLVMHash test class."""
-
- @mock.patch.object(subprocess, 'Popen')
- def testCloneRepoSucceedsWhenGitSucceeds(self, popen_mock):
- popen_mock.side_effect = MakeMockPopen(return_code=0)
- llvm_hash = LLVMHash()
-
- into_tempdir = '/tmp/tmpTest'
- llvm_hash.CloneLLVMRepo(into_tempdir)
- popen_mock.assert_called_with(
- ['git', 'clone', get_llvm_hash._LLVM_GIT_URL, into_tempdir],
- stderr=subprocess.PIPE)
-
- @mock.patch.object(subprocess, 'Popen')
- def testCloneRepoFailsWhenGitFails(self, popen_mock):
- popen_mock.side_effect = MakeMockPopen(return_code=1)
-
- with self.assertRaises(ValueError) as err:
- LLVMHash().CloneLLVMRepo('/tmp/tmp1')
-
- self.assertIn('Failed to clone', str(err.exception.args))
- self.assertIn('some stderr', str(err.exception.args))
-
- @mock.patch.object(get_llvm_hash, 'GetGitHashFrom')
- def testGetGitHashWorks(self, mock_get_git_hash):
- mock_get_git_hash.return_value = 'a13testhash2'
-
- self.assertEqual(
- get_llvm_hash.GetGitHashFrom('/tmp/tmpTest', 100), 'a13testhash2')
-
- mock_get_git_hash.assert_called_once()
-
- @mock.patch.object(LLVMHash, 'GetLLVMHash')
- @mock.patch.object(get_llvm_hash, 'GetGoogle3LLVMVersion')
- def testReturnGoogle3LLVMHash(self, mock_google3_llvm_version,
- mock_get_llvm_hash):
- mock_get_llvm_hash.return_value = 'a13testhash3'
- mock_google3_llvm_version.return_value = 1000
- self.assertEqual(LLVMHash().GetGoogle3LLVMHash(), 'a13testhash3')
- mock_get_llvm_hash.assert_called_once_with(1000)
-
- @mock.patch.object(LLVMHash, 'GetLLVMHash')
- @mock.patch.object(get_llvm_hash, 'GetGoogle3LLVMVersion')
- def testReturnGoogle3UnstableLLVMHash(self, mock_google3_llvm_version,
- mock_get_llvm_hash):
- mock_get_llvm_hash.return_value = 'a13testhash3'
- mock_google3_llvm_version.return_value = 1000
- self.assertEqual(LLVMHash().GetGoogle3UnstableLLVMHash(), 'a13testhash3')
- mock_get_llvm_hash.assert_called_once_with(1000)
-
- @mock.patch.object(subprocess, 'check_output')
- def testSuccessfullyGetGitHashFromToTOfLLVM(self, mock_check_output):
- mock_check_output.return_value = 'a123testhash1 path/to/main\n'
- self.assertEqual(LLVMHash().GetTopOfTrunkGitHash(), 'a123testhash1')
- mock_check_output.assert_called_once()
-
- @mock.patch.object(subprocess, 'Popen')
- def testCheckoutBranch(self, mock_popen):
- mock_popen.return_value = mock.MagicMock(
- communicate=lambda: (None, None), returncode=0)
- get_llvm_hash.CheckoutBranch('fake/src_dir', 'fake_branch')
- self.assertEqual(
- mock_popen.call_args_list[0][0],
- (['git', '-C', 'fake/src_dir', 'checkout', 'fake_branch'],))
- self.assertEqual(mock_popen.call_args_list[1][0],
- (['git', '-C', 'fake/src_dir', 'pull'],))
-
- def testParseLLVMMajorVersion(self):
- cmakelist_42 = ('set(CMAKE_BUILD_WITH_INSTALL_NAME_DIR ON)\n'
- 'if(NOT DEFINED LLVM_VERSION_MAJOR)\n'
- ' set(LLVM_VERSION_MAJOR 42)\n'
- 'endif()')
- self.assertEqual(get_llvm_hash.ParseLLVMMajorVersion(cmakelist_42), '42')
-
- def testParseLLVMMajorVersionInvalid(self):
- invalid_cmakelist = 'invalid cmakelist.txt contents'
- with self.assertRaises(ValueError):
- get_llvm_hash.ParseLLVMMajorVersion(invalid_cmakelist)
-
- @mock.patch.object(get_llvm_hash, 'GetAndUpdateLLVMProjectInLLVMTools')
- @mock.patch.object(get_llvm_hash, 'ParseLLVMMajorVersion')
- @mock.patch.object(get_llvm_hash, 'CheckCommand')
- @mock.patch.object(get_llvm_hash, 'CheckoutBranch')
- @mock.patch(
- 'get_llvm_hash.open',
- mock.mock_open(read_data='mock contents'),
- create=True)
- def testGetLLVMMajorVersion(self, mock_checkout_branch, mock_git_checkout,
- mock_major_version, mock_llvm_project_path):
- mock_llvm_project_path.return_value = 'path/to/llvm-project'
- mock_major_version.return_value = '1234'
- self.assertEqual(get_llvm_hash.GetLLVMMajorVersion('314159265'), '1234')
- # Second call should be memoized
- self.assertEqual(get_llvm_hash.GetLLVMMajorVersion('314159265'), '1234')
- mock_llvm_project_path.assert_called_once()
- mock_major_version.assert_called_with('mock contents')
- mock_git_checkout.assert_called_once_with(
- ['git', '-C', 'path/to/llvm-project', 'checkout', '314159265'])
- mock_checkout_branch.assert_called_once_with('path/to/llvm-project', 'main')
-
-
-if __name__ == '__main__':
- unittest.main()
+ """The LLVMHash test class."""
+
+ @mock.patch.object(subprocess, "Popen")
+ def testCloneRepoSucceedsWhenGitSucceeds(self, popen_mock):
+ popen_mock.side_effect = MakeMockPopen(return_code=0)
+ llvm_hash = LLVMHash()
+
+ into_tempdir = "/tmp/tmpTest"
+ llvm_hash.CloneLLVMRepo(into_tempdir)
+ popen_mock.assert_called_with(
+ ["git", "clone", get_llvm_hash._LLVM_GIT_URL, into_tempdir],
+ stderr=subprocess.PIPE,
+ )
+
+ @mock.patch.object(subprocess, "Popen")
+ def testCloneRepoFailsWhenGitFails(self, popen_mock):
+ popen_mock.side_effect = MakeMockPopen(return_code=1)
+
+ with self.assertRaises(ValueError) as err:
+ LLVMHash().CloneLLVMRepo("/tmp/tmp1")
+
+ self.assertIn("Failed to clone", str(err.exception.args))
+ self.assertIn("some stderr", str(err.exception.args))
+
+ @mock.patch.object(get_llvm_hash, "GetGitHashFrom")
+ def testGetGitHashWorks(self, mock_get_git_hash):
+ mock_get_git_hash.return_value = "a13testhash2"
+
+ self.assertEqual(
+ get_llvm_hash.GetGitHashFrom("/tmp/tmpTest", 100), "a13testhash2"
+ )
+
+ mock_get_git_hash.assert_called_once()
+
+ @mock.patch.object(LLVMHash, "GetLLVMHash")
+ @mock.patch.object(get_llvm_hash, "GetGoogle3LLVMVersion")
+ def testReturnGoogle3LLVMHash(
+ self, mock_google3_llvm_version, mock_get_llvm_hash
+ ):
+ mock_get_llvm_hash.return_value = "a13testhash3"
+ mock_google3_llvm_version.return_value = 1000
+ self.assertEqual(LLVMHash().GetGoogle3LLVMHash(), "a13testhash3")
+ mock_get_llvm_hash.assert_called_once_with(1000)
+
+ @mock.patch.object(LLVMHash, "GetLLVMHash")
+ @mock.patch.object(get_llvm_hash, "GetGoogle3LLVMVersion")
+ def testReturnGoogle3UnstableLLVMHash(
+ self, mock_google3_llvm_version, mock_get_llvm_hash
+ ):
+ mock_get_llvm_hash.return_value = "a13testhash3"
+ mock_google3_llvm_version.return_value = 1000
+ self.assertEqual(
+ LLVMHash().GetGoogle3UnstableLLVMHash(), "a13testhash3"
+ )
+ mock_get_llvm_hash.assert_called_once_with(1000)
+
+ @mock.patch.object(subprocess, "check_output")
+ def testSuccessfullyGetGitHashFromToTOfLLVM(self, mock_check_output):
+ mock_check_output.return_value = "a123testhash1 path/to/main\n"
+ self.assertEqual(LLVMHash().GetTopOfTrunkGitHash(), "a123testhash1")
+ mock_check_output.assert_called_once()
+
+ @mock.patch.object(subprocess, "Popen")
+ def testCheckoutBranch(self, mock_popen):
+ mock_popen.return_value = mock.MagicMock(
+ communicate=lambda: (None, None), returncode=0
+ )
+ get_llvm_hash.CheckoutBranch("fake/src_dir", "fake_branch")
+ self.assertEqual(
+ mock_popen.call_args_list[0][0],
+ (["git", "-C", "fake/src_dir", "checkout", "fake_branch"],),
+ )
+ self.assertEqual(
+ mock_popen.call_args_list[1][0],
+ (["git", "-C", "fake/src_dir", "pull"],),
+ )
+
+ def testParseLLVMMajorVersion(self):
+ cmakelist_42 = (
+ "set(CMAKE_BUILD_WITH_INSTALL_NAME_DIR ON)\n"
+ "if(NOT DEFINED LLVM_VERSION_MAJOR)\n"
+ " set(LLVM_VERSION_MAJOR 42)\n"
+ "endif()"
+ )
+ self.assertEqual(
+ get_llvm_hash.ParseLLVMMajorVersion(cmakelist_42), "42"
+ )
+
+ def testParseLLVMMajorVersionInvalid(self):
+ invalid_cmakelist = "invalid cmakelist.txt contents"
+ with self.assertRaises(ValueError):
+ get_llvm_hash.ParseLLVMMajorVersion(invalid_cmakelist)
+
+ @mock.patch.object(get_llvm_hash, "GetAndUpdateLLVMProjectInLLVMTools")
+ @mock.patch.object(get_llvm_hash, "ParseLLVMMajorVersion")
+ @mock.patch.object(get_llvm_hash, "CheckCommand")
+ @mock.patch.object(get_llvm_hash, "CheckoutBranch")
+ @mock.patch(
+ "get_llvm_hash.open",
+ mock.mock_open(read_data="mock contents"),
+ create=True,
+ )
+ def testGetLLVMMajorVersion(
+ self,
+ mock_checkout_branch,
+ mock_git_checkout,
+ mock_major_version,
+ mock_llvm_project_path,
+ ):
+ mock_llvm_project_path.return_value = "path/to/llvm-project"
+ mock_major_version.return_value = "1234"
+ self.assertEqual(get_llvm_hash.GetLLVMMajorVersion("314159265"), "1234")
+ # Second call should be memoized
+ self.assertEqual(get_llvm_hash.GetLLVMMajorVersion("314159265"), "1234")
+ mock_llvm_project_path.assert_called_once()
+ mock_major_version.assert_called_with("mock contents")
+ mock_git_checkout.assert_called_once_with(
+ ["git", "-C", "path/to/llvm-project", "checkout", "314159265"]
+ )
+ mock_checkout_branch.assert_called_once_with(
+ "path/to/llvm-project", "main"
+ )
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/llvm_tools/get_upstream_patch.py b/llvm_tools/get_upstream_patch.py
index 5669b023..72aa16b6 100755
--- a/llvm_tools/get_upstream_patch.py
+++ b/llvm_tools/get_upstream_patch.py
@@ -1,29 +1,31 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Get an upstream patch to LLVM's PATCHES.json."""
import argparse
+import dataclasses
+from datetime import datetime
import json
import logging
import os
+from pathlib import Path
import shlex
import subprocess
import sys
import typing as t
-from datetime import datetime
-
-import dataclasses
import chroot
import get_llvm_hash
import git
import git_llvm_rev
+import patch_utils
import update_chromeos_llvm_hash
+
__DOC_EPILOGUE = """
Example Usage:
get_upstream_patch --chroot_path ~/chromiumos --platform chromiumos \
@@ -32,434 +34,567 @@ Example Usage:
class CherrypickError(ValueError):
- """A ValueError that highlights the cherry-pick has been seen before"""
-
-
-def add_patch(patches_json_path: str, patches_dir: str,
- relative_patches_dir: str, start_version: git_llvm_rev.Rev,
- llvm_dir: str, rev: t.Union[git_llvm_rev.Rev, str], sha: str,
- package: str, platforms: t.List[str]):
- """Gets the start and end intervals in 'json_file'.
-
- Args:
- patches_json_path: The absolute path to PATCHES.json.
- patches_dir: The aboslute path to the directory patches are in.
- relative_patches_dir: The relative path to PATCHES.json.
- start_version: The base LLVM revision this patch applies to.
- llvm_dir: The path to LLVM checkout.
- rev: An LLVM revision (git_llvm_rev.Rev) for a cherrypicking, or a
- differential revision (str) otherwise.
- sha: The LLVM git sha that corresponds to the patch. For differential
- revisions, the git sha from the local commit created by 'arc patch'
- is used.
- package: The LLVM project name this patch applies to.
- platforms: List of platforms this patch applies to.
-
- Raises:
- CherrypickError: A ValueError that highlights the cherry-pick has been
- seen before.
- """
-
- with open(patches_json_path, encoding='utf-8') as f:
- patches_json = json.load(f)
-
- is_cherrypick = isinstance(rev, git_llvm_rev.Rev)
- if is_cherrypick:
- file_name = f'{sha}.patch'
- else:
- file_name = f'{rev}.patch'
- rel_patch_path = os.path.join(relative_patches_dir, file_name)
-
- for p in patches_json:
- rel_path = p['rel_patch_path']
- if rel_path == rel_patch_path:
- raise CherrypickError(
- f'Patch at {rel_path} already exists in PATCHES.json')
+ """A ValueError that highlights the cherry-pick has been seen before"""
+
+
+class CherrypickVersionError(ValueError):
+ """A ValueError that highlights the cherry-pick is before the start_sha"""
+
+
+class PatchApplicationError(ValueError):
+ """A ValueError indicating that a test patch application was unsuccessful"""
+
+
+def validate_patch_application(
+ llvm_dir: Path, svn_version: int, patches_json_fp: Path, patch_props
+):
+
+ start_sha = get_llvm_hash.GetGitHashFrom(llvm_dir, svn_version)
+ subprocess.run(["git", "-C", llvm_dir, "checkout", start_sha], check=True)
+
+ predecessor_apply_results = patch_utils.apply_all_from_json(
+ svn_version, llvm_dir, patches_json_fp, continue_on_failure=True
+ )
+
+ if predecessor_apply_results.failed_patches:
+ logging.error("Failed to apply patches from PATCHES.json:")
+ for p in predecessor_apply_results.failed_patches:
+ logging.error(f"Patch title: {p.title()}")
+ raise PatchApplicationError("Failed to apply patch from PATCHES.json")
+
+ patch_entry = patch_utils.PatchEntry.from_dict(
+ patches_json_fp.parent, patch_props
+ )
+ test_apply_result = patch_entry.test_apply(Path(llvm_dir))
+
+ if not test_apply_result:
+ logging.error("Could not apply requested patch")
+ logging.error(test_apply_result.failure_info())
+ raise PatchApplicationError(
+ f'Failed to apply patch: {patch_props["metadata"]["title"]}'
+ )
+
+
+def add_patch(
+ patches_json_path: str,
+ patches_dir: str,
+ relative_patches_dir: str,
+ start_version: git_llvm_rev.Rev,
+ llvm_dir: str,
+ rev: t.Union[git_llvm_rev.Rev, str],
+ sha: str,
+ package: str,
+ platforms: t.List[str],
+):
+ """Gets the start and end intervals in 'json_file'.
+
+ Args:
+ patches_json_path: The absolute path to PATCHES.json.
+ patches_dir: The aboslute path to the directory patches are in.
+ relative_patches_dir: The relative path to PATCHES.json.
+ start_version: The base LLVM revision this patch applies to.
+ llvm_dir: The path to LLVM checkout.
+ rev: An LLVM revision (git_llvm_rev.Rev) for a cherrypicking, or a
+ differential revision (str) otherwise.
+ sha: The LLVM git sha that corresponds to the patch. For differential
+ revisions, the git sha from the local commit created by 'arc patch'
+ is used.
+ package: The LLVM project name this patch applies to.
+ platforms: List of platforms this patch applies to.
+
+ Raises:
+ CherrypickError: A ValueError that highlights the cherry-pick has been
+ seen before.
+ CherrypickRangeError: A ValueError that's raised when the given patch
+ is from before the start_sha.
+ """
+
+ is_cherrypick = isinstance(rev, git_llvm_rev.Rev)
if is_cherrypick:
- if sha in rel_path:
- logging.warning(
- 'Similarly-named patch already exists in PATCHES.json: %r',
- rel_path)
-
- with open(os.path.join(patches_dir, file_name), 'wb') as f:
- cmd = ['git', 'show', sha]
- # Only apply the part of the patch that belongs to this package, expect
- # LLVM. This is because some packages are built with LLVM ebuild on X86 but
- # not on the other architectures. e.g. compiler-rt. Therefore always apply
- # the entire patch to LLVM ebuild as a workaround.
- if package != 'llvm':
- cmd.append(package_to_project(package))
- subprocess.check_call(cmd, stdout=f, cwd=llvm_dir)
-
- commit_subject = subprocess.check_output(
- ['git', 'log', '-n1', '--format=%s', sha],
- cwd=llvm_dir,
- encoding='utf-8')
-
- end_vers = rev.number if isinstance(rev, git_llvm_rev.Rev) else None
- patch_props = {
- 'rel_patch_path': rel_patch_path,
- 'metadata': {
- 'title': commit_subject.strip(),
- 'info': [],
- },
- 'platforms': sorted(platforms),
- 'version_range': {
- 'from': start_version.number,
- 'until': end_vers,
- },
- }
- patches_json.append(patch_props)
-
- temp_file = patches_json_path + '.tmp'
- with open(temp_file, 'w', encoding='utf-8') as f:
- json.dump(patches_json,
- f,
- indent=4,
- separators=(',', ': '),
- sort_keys=True)
- f.write('\n')
- os.rename(temp_file, patches_json_path)
+ file_name = f"{sha}.patch"
+ else:
+ file_name = f"{rev}.patch"
+ rel_patch_path = os.path.join(relative_patches_dir, file_name)
+
+ # Check that we haven't grabbed a patch range that's nonsensical.
+ end_vers = rev.number if isinstance(rev, git_llvm_rev.Rev) else None
+ if end_vers is not None and end_vers <= start_version.number:
+ raise CherrypickVersionError(
+ f"`until` version {end_vers} is earlier or equal to"
+ f" `from` version {start_version.number} for patch"
+ f" {rel_patch_path}"
+ )
+
+ with open(patches_json_path, encoding="utf-8") as f:
+ patches_json = json.load(f)
+
+ for p in patches_json:
+ rel_path = p["rel_patch_path"]
+ if rel_path == rel_patch_path:
+ raise CherrypickError(
+ f"Patch at {rel_path} already exists in PATCHES.json"
+ )
+ if is_cherrypick:
+ if sha in rel_path:
+ logging.warning(
+ "Similarly-named patch already exists in PATCHES.json: %r",
+ rel_path,
+ )
+
+ with open(os.path.join(patches_dir, file_name), "wb") as f:
+ cmd = ["git", "show", sha]
+ # Only apply the part of the patch that belongs to this package, expect
+ # LLVM. This is because some packages are built with LLVM ebuild on X86 but
+ # not on the other architectures. e.g. compiler-rt. Therefore always apply
+ # the entire patch to LLVM ebuild as a workaround.
+ if package != "llvm":
+ cmd.append(package_to_project(package))
+ subprocess.check_call(cmd, stdout=f, cwd=llvm_dir)
+
+ commit_subject = subprocess.check_output(
+ ["git", "log", "-n1", "--format=%s", sha],
+ cwd=llvm_dir,
+ encoding="utf-8",
+ )
+ patch_props = {
+ "rel_patch_path": rel_patch_path,
+ "metadata": {
+ "title": commit_subject.strip(),
+ "info": [],
+ },
+ "platforms": sorted(platforms),
+ "version_range": {
+ "from": start_version.number,
+ "until": end_vers,
+ },
+ }
+
+ with patch_utils.git_clean_context(Path(llvm_dir)):
+ validate_patch_application(
+ Path(llvm_dir),
+ start_version.number,
+ Path(patches_json_path),
+ patch_props,
+ )
+
+ patches_json.append(patch_props)
+
+ temp_file = patches_json_path + ".tmp"
+ with open(temp_file, "w", encoding="utf-8") as f:
+ json.dump(
+ patches_json, f, indent=4, separators=(",", ": "), sort_keys=True
+ )
+ f.write("\n")
+ os.rename(temp_file, patches_json_path)
def parse_ebuild_for_assignment(ebuild_path: str, var_name: str) -> str:
- # '_pre' filters the LLVM 9.0 ebuild, which we never want to target, from
- # this list.
- candidates = [
- x for x in os.listdir(ebuild_path)
- if x.endswith('.ebuild') and '_pre' in x
- ]
-
- if not candidates:
- raise ValueError('No ebuilds found under %r' % ebuild_path)
-
- ebuild = os.path.join(ebuild_path, max(candidates))
- with open(ebuild, encoding='utf-8') as f:
- var_name_eq = var_name + '='
- for orig_line in f:
- if not orig_line.startswith(var_name_eq):
- continue
-
- # We shouldn't see much variety here, so do the simplest thing possible.
- line = orig_line[len(var_name_eq):]
- # Remove comments
- line = line.split('#')[0]
- # Remove quotes
- line = shlex.split(line)
- if len(line) != 1:
- raise ValueError('Expected exactly one quoted value in %r' % orig_line)
- return line[0].strip()
-
- raise ValueError('No %s= line found in %r' % (var_name, ebuild))
+ # '_pre' filters the LLVM 9.0 ebuild, which we never want to target, from
+ # this list.
+ candidates = [
+ x
+ for x in os.listdir(ebuild_path)
+ if x.endswith(".ebuild") and "_pre" in x
+ ]
+
+ if not candidates:
+ raise ValueError("No ebuilds found under %r" % ebuild_path)
+
+ ebuild = os.path.join(ebuild_path, max(candidates))
+ with open(ebuild, encoding="utf-8") as f:
+ var_name_eq = var_name + "="
+ for orig_line in f:
+ if not orig_line.startswith(var_name_eq):
+ continue
+
+ # We shouldn't see much variety here, so do the simplest thing possible.
+ line = orig_line[len(var_name_eq) :]
+ # Remove comments
+ line = line.split("#")[0]
+ # Remove quotes
+ line = shlex.split(line)
+ if len(line) != 1:
+ raise ValueError(
+ "Expected exactly one quoted value in %r" % orig_line
+ )
+ return line[0].strip()
+
+ raise ValueError("No %s= line found in %r" % (var_name, ebuild))
# Resolves a git ref (or similar) to a LLVM SHA.
def resolve_llvm_ref(llvm_dir: str, sha: str) -> str:
- return subprocess.check_output(
- ['git', 'rev-parse', sha],
- encoding='utf-8',
- cwd=llvm_dir,
- ).strip()
+ return subprocess.check_output(
+ ["git", "rev-parse", sha],
+ encoding="utf-8",
+ cwd=llvm_dir,
+ ).strip()
# Get the package name of an LLVM project
def project_to_package(project: str) -> str:
- if project == 'libunwind':
- return 'llvm-libunwind'
- return project
+ if project == "libunwind":
+ return "llvm-libunwind"
+ return project
# Get the LLVM project name of a package
def package_to_project(package: str) -> str:
- if package == 'llvm-libunwind':
- return 'libunwind'
- return package
+ if package == "llvm-libunwind":
+ return "libunwind"
+ return package
# Get the LLVM projects change in the specifed sha
def get_package_names(sha: str, llvm_dir: str) -> list:
- paths = subprocess.check_output(
- ['git', 'show', '--name-only', '--format=', sha],
- cwd=llvm_dir,
- encoding='utf-8').splitlines()
- # Some LLVM projects are built by LLVM ebuild on X86, so always apply the
- # patch to LLVM ebuild
- packages = {'llvm'}
- # Detect if there are more packages to apply the patch to
- for path in paths:
- package = project_to_package(path.split('/')[0])
- if package in ('compiler-rt', 'libcxx', 'libcxxabi', 'llvm-libunwind'):
- packages.add(package)
- packages = list(sorted(packages))
- return packages
-
-
-def create_patch_for_packages(packages: t.List[str], symlinks: t.List[str],
- start_rev: git_llvm_rev.Rev,
- rev: t.Union[git_llvm_rev.Rev, str], sha: str,
- llvm_dir: str, platforms: t.List[str]):
- """Create a patch and add its metadata for each package"""
- for package, symlink in zip(packages, symlinks):
- symlink_dir = os.path.dirname(symlink)
- patches_json_path = os.path.join(symlink_dir, 'files/PATCHES.json')
- relative_patches_dir = 'cherry' if package == 'llvm' else ''
- patches_dir = os.path.join(symlink_dir, 'files', relative_patches_dir)
- logging.info('Getting %s (%s) into %s', rev, sha, package)
- add_patch(patches_json_path,
- patches_dir,
- relative_patches_dir,
- start_rev,
- llvm_dir,
- rev,
- sha,
- package,
- platforms=platforms)
-
-
-def make_cl(symlinks_to_uprev: t.List[str], llvm_symlink_dir: str, branch: str,
- commit_messages: t.List[str], reviewers: t.Optional[t.List[str]],
- cc: t.Optional[t.List[str]]):
- symlinks_to_uprev = sorted(set(symlinks_to_uprev))
- for symlink in symlinks_to_uprev:
- update_chromeos_llvm_hash.UprevEbuildSymlink(symlink)
- subprocess.check_output(['git', 'add', '--all'],
- cwd=os.path.dirname(symlink))
- git.UploadChanges(llvm_symlink_dir, branch, commit_messages, reviewers, cc)
- git.DeleteBranch(llvm_symlink_dir, branch)
+ paths = subprocess.check_output(
+ ["git", "show", "--name-only", "--format=", sha],
+ cwd=llvm_dir,
+ encoding="utf-8",
+ ).splitlines()
+ # Some LLVM projects are built by LLVM ebuild on X86, so always apply the
+ # patch to LLVM ebuild
+ packages = {"llvm"}
+ # Detect if there are more packages to apply the patch to
+ for path in paths:
+ package = project_to_package(path.split("/")[0])
+ if package in ("compiler-rt", "libcxx", "libcxxabi", "llvm-libunwind"):
+ packages.add(package)
+ packages = list(sorted(packages))
+ return packages
+
+
+def create_patch_for_packages(
+ packages: t.List[str],
+ symlinks: t.List[str],
+ start_rev: git_llvm_rev.Rev,
+ rev: t.Union[git_llvm_rev.Rev, str],
+ sha: str,
+ llvm_dir: str,
+ platforms: t.List[str],
+):
+ """Create a patch and add its metadata for each package"""
+ for package, symlink in zip(packages, symlinks):
+ symlink_dir = os.path.dirname(symlink)
+ patches_json_path = os.path.join(symlink_dir, "files/PATCHES.json")
+ relative_patches_dir = "cherry" if package == "llvm" else ""
+ patches_dir = os.path.join(symlink_dir, "files", relative_patches_dir)
+ logging.info("Getting %s (%s) into %s", rev, sha, package)
+ add_patch(
+ patches_json_path,
+ patches_dir,
+ relative_patches_dir,
+ start_rev,
+ llvm_dir,
+ rev,
+ sha,
+ package,
+ platforms=platforms,
+ )
+
+
+def make_cl(
+ symlinks_to_uprev: t.List[str],
+ llvm_symlink_dir: str,
+ branch: str,
+ commit_messages: t.List[str],
+ reviewers: t.Optional[t.List[str]],
+ cc: t.Optional[t.List[str]],
+):
+ symlinks_to_uprev = sorted(set(symlinks_to_uprev))
+ for symlink in symlinks_to_uprev:
+ update_chromeos_llvm_hash.UprevEbuildSymlink(symlink)
+ subprocess.check_output(
+ ["git", "add", "--all"], cwd=os.path.dirname(symlink)
+ )
+ git.UploadChanges(llvm_symlink_dir, branch, commit_messages, reviewers, cc)
+ git.DeleteBranch(llvm_symlink_dir, branch)
def resolve_symbolic_sha(start_sha: str, llvm_symlink_dir: str) -> str:
- if start_sha == 'llvm':
- return parse_ebuild_for_assignment(llvm_symlink_dir, 'LLVM_HASH')
+ if start_sha == "llvm":
+ return parse_ebuild_for_assignment(llvm_symlink_dir, "LLVM_HASH")
- if start_sha == 'llvm-next':
- return parse_ebuild_for_assignment(llvm_symlink_dir, 'LLVM_NEXT_HASH')
+ if start_sha == "llvm-next":
+ return parse_ebuild_for_assignment(llvm_symlink_dir, "LLVM_NEXT_HASH")
- return start_sha
+ return start_sha
def find_patches_and_make_cl(
- chroot_path: str, patches: t.List[str], start_rev: git_llvm_rev.Rev,
- llvm_config: git_llvm_rev.LLVMConfig, llvm_symlink_dir: str,
- create_cl: bool, skip_dependencies: bool,
- reviewers: t.Optional[t.List[str]], cc: t.Optional[t.List[str]],
- platforms: t.List[str]):
-
- converted_patches = [
- _convert_patch(llvm_config, skip_dependencies, p) for p in patches
- ]
- potential_duplicates = _get_duplicate_shas(converted_patches)
- if potential_duplicates:
- err_msg = '\n'.join(f'{a.patch} == {b.patch}'
- for a, b in potential_duplicates)
- raise RuntimeError(f'Found Duplicate SHAs:\n{err_msg}')
-
- # CL Related variables, only used if `create_cl`
- symlinks_to_uprev = []
- commit_messages = [
- 'llvm: get patches from upstream\n',
- ]
- branch = f'get-upstream-{datetime.now().strftime("%Y%m%d%H%M%S%f")}'
-
- if create_cl:
- git.CreateBranch(llvm_symlink_dir, branch)
-
- for parsed_patch in converted_patches:
- # Find out the llvm projects changed in this commit
- packages = get_package_names(parsed_patch.sha, llvm_config.dir)
- # Find out the ebuild symlinks of the corresponding ChromeOS packages
- symlinks = chroot.GetChrootEbuildPaths(chroot_path, [
- 'sys-devel/llvm' if package == 'llvm' else 'sys-libs/' + package
- for package in packages
- ])
- symlinks = chroot.ConvertChrootPathsToAbsolutePaths(chroot_path, symlinks)
- # Create a local patch for all the affected llvm projects
- create_patch_for_packages(packages,
- symlinks,
- start_rev,
- parsed_patch.rev,
- parsed_patch.sha,
- llvm_config.dir,
- platforms=platforms)
- if create_cl:
- symlinks_to_uprev.extend(symlinks)
+ chroot_path: str,
+ patches: t.List[str],
+ start_rev: git_llvm_rev.Rev,
+ llvm_config: git_llvm_rev.LLVMConfig,
+ llvm_symlink_dir: str,
+ create_cl: bool,
+ skip_dependencies: bool,
+ reviewers: t.Optional[t.List[str]],
+ cc: t.Optional[t.List[str]],
+ platforms: t.List[str],
+):
+
+ converted_patches = [
+ _convert_patch(llvm_config, skip_dependencies, p) for p in patches
+ ]
+ potential_duplicates = _get_duplicate_shas(converted_patches)
+ if potential_duplicates:
+ err_msg = "\n".join(
+ f"{a.patch} == {b.patch}" for a, b in potential_duplicates
+ )
+ raise RuntimeError(f"Found Duplicate SHAs:\n{err_msg}")
+
+ # CL Related variables, only used if `create_cl`
+ symlinks_to_uprev = []
+ commit_messages = [
+ "llvm: get patches from upstream\n",
+ ]
+ branch = f'get-upstream-{datetime.now().strftime("%Y%m%d%H%M%S%f")}'
- commit_messages.extend([
- parsed_patch.git_msg(),
- subprocess.check_output(
- ['git', 'log', '-n1', '--oneline', parsed_patch.sha],
- cwd=llvm_config.dir,
- encoding='utf-8')
- ])
-
- if parsed_patch.is_differential:
- subprocess.check_output(['git', 'reset', '--hard', 'HEAD^'],
- cwd=llvm_config.dir)
+ if create_cl:
+ git.CreateBranch(llvm_symlink_dir, branch)
+
+ for parsed_patch in converted_patches:
+ # Find out the llvm projects changed in this commit
+ packages = get_package_names(parsed_patch.sha, llvm_config.dir)
+ # Find out the ebuild symlinks of the corresponding ChromeOS packages
+ symlinks = chroot.GetChrootEbuildPaths(
+ chroot_path,
+ [
+ "sys-devel/llvm" if package == "llvm" else "sys-libs/" + package
+ for package in packages
+ ],
+ )
+ symlinks = chroot.ConvertChrootPathsToAbsolutePaths(
+ chroot_path, symlinks
+ )
+ # Create a local patch for all the affected llvm projects
+ create_patch_for_packages(
+ packages,
+ symlinks,
+ start_rev,
+ parsed_patch.rev,
+ parsed_patch.sha,
+ llvm_config.dir,
+ platforms=platforms,
+ )
+ if create_cl:
+ symlinks_to_uprev.extend(symlinks)
+
+ commit_messages.extend(
+ [
+ parsed_patch.git_msg(),
+ subprocess.check_output(
+ ["git", "log", "-n1", "--oneline", parsed_patch.sha],
+ cwd=llvm_config.dir,
+ encoding="utf-8",
+ ),
+ ]
+ )
+
+ if parsed_patch.is_differential:
+ subprocess.check_output(
+ ["git", "reset", "--hard", "HEAD^"], cwd=llvm_config.dir
+ )
- if create_cl:
- make_cl(symlinks_to_uprev, llvm_symlink_dir, branch, commit_messages,
- reviewers, cc)
+ if create_cl:
+ make_cl(
+ symlinks_to_uprev,
+ llvm_symlink_dir,
+ branch,
+ commit_messages,
+ reviewers,
+ cc,
+ )
@dataclasses.dataclass(frozen=True)
class ParsedPatch:
- """Class to keep track of bundled patch info."""
- patch: str
- sha: str
- is_differential: bool
- rev: t.Union[git_llvm_rev.Rev, str]
-
- def git_msg(self) -> str:
- if self.is_differential:
- return f'\n\nreviews.llvm.org/{self.patch}\n'
- return f'\n\nreviews.llvm.org/rG{self.sha}\n'
-
-
-def _convert_patch(llvm_config: git_llvm_rev.LLVMConfig,
- skip_dependencies: bool, patch: str) -> ParsedPatch:
- """Extract git revision info from a patch.
-
- Args:
- llvm_config: LLVM configuration object.
- skip_dependencies: Pass --skip-dependecies for to `arc`
- patch: A single patch referent string.
-
- Returns:
- A [ParsedPatch] object.
- """
-
- # git hash should only have lower-case letters
- is_differential = patch.startswith('D')
- if is_differential:
- subprocess.check_output(
- [
- 'arc', 'patch', '--nobranch',
- '--skip-dependencies' if skip_dependencies else '--revision', patch
- ],
- cwd=llvm_config.dir,
+ """Class to keep track of bundled patch info."""
+
+ patch: str
+ sha: str
+ is_differential: bool
+ rev: t.Union[git_llvm_rev.Rev, str]
+
+ def git_msg(self) -> str:
+ if self.is_differential:
+ return f"\n\nreviews.llvm.org/{self.patch}\n"
+ return f"\n\nreviews.llvm.org/rG{self.sha}\n"
+
+
+def _convert_patch(
+ llvm_config: git_llvm_rev.LLVMConfig, skip_dependencies: bool, patch: str
+) -> ParsedPatch:
+ """Extract git revision info from a patch.
+
+ Args:
+ llvm_config: LLVM configuration object.
+ skip_dependencies: Pass --skip-dependecies for to `arc`
+ patch: A single patch referent string.
+
+ Returns:
+ A [ParsedPatch] object.
+ """
+
+ # git hash should only have lower-case letters
+ is_differential = patch.startswith("D")
+ if is_differential:
+ subprocess.check_output(
+ [
+ "arc",
+ "patch",
+ "--nobranch",
+ "--skip-dependencies" if skip_dependencies else "--revision",
+ patch,
+ ],
+ cwd=llvm_config.dir,
+ )
+ sha = resolve_llvm_ref(llvm_config.dir, "HEAD")
+ rev = patch
+ else:
+ sha = resolve_llvm_ref(llvm_config.dir, patch)
+ rev = git_llvm_rev.translate_sha_to_rev(llvm_config, sha)
+ return ParsedPatch(
+ patch=patch, sha=sha, rev=rev, is_differential=is_differential
+ )
+
+
+def _get_duplicate_shas(
+ patches: t.List[ParsedPatch],
+) -> t.List[t.Tuple[ParsedPatch, ParsedPatch]]:
+ """Return a list of Patches which have duplicate SHA's"""
+ return [
+ (left, right)
+ for i, left in enumerate(patches)
+ for right in patches[i + 1 :]
+ if left.sha == right.sha
+ ]
+
+
+def get_from_upstream(
+ chroot_path: str,
+ create_cl: bool,
+ start_sha: str,
+ patches: t.List[str],
+ platforms: t.List[str],
+ skip_dependencies: bool = False,
+ reviewers: t.List[str] = None,
+ cc: t.List[str] = None,
+):
+ llvm_symlink = chroot.ConvertChrootPathsToAbsolutePaths(
+ chroot_path,
+ chroot.GetChrootEbuildPaths(chroot_path, ["sys-devel/llvm"]),
+ )[0]
+ llvm_symlink_dir = os.path.dirname(llvm_symlink)
+
+ git_status = subprocess.check_output(
+ ["git", "status", "-s"], cwd=llvm_symlink_dir, encoding="utf-8"
)
- sha = resolve_llvm_ref(llvm_config.dir, 'HEAD')
- rev = patch
- else:
- sha = resolve_llvm_ref(llvm_config.dir, patch)
- rev = git_llvm_rev.translate_sha_to_rev(llvm_config, sha)
- return ParsedPatch(patch=patch,
- sha=sha,
- rev=rev,
- is_differential=is_differential)
-
-
-def _get_duplicate_shas(patches: t.List[ParsedPatch]
- ) -> t.List[t.Tuple[ParsedPatch, ParsedPatch]]:
- """Return a list of Patches which have duplicate SHA's"""
- return [(left, right) for i, left in enumerate(patches)
- for right in patches[i + 1:] if left.sha == right.sha]
-
-
-def get_from_upstream(chroot_path: str,
- create_cl: bool,
- start_sha: str,
- patches: t.List[str],
- platforms: t.List[str],
- skip_dependencies: bool = False,
- reviewers: t.List[str] = None,
- cc: t.List[str] = None):
- llvm_symlink = chroot.ConvertChrootPathsToAbsolutePaths(
- chroot_path, chroot.GetChrootEbuildPaths(chroot_path,
- ['sys-devel/llvm']))[0]
- llvm_symlink_dir = os.path.dirname(llvm_symlink)
-
- git_status = subprocess.check_output(['git', 'status', '-s'],
- cwd=llvm_symlink_dir,
- encoding='utf-8')
-
- if git_status:
- error_path = os.path.dirname(os.path.dirname(llvm_symlink_dir))
- raise ValueError(f'Uncommited changes detected in {error_path}')
-
- start_sha = resolve_symbolic_sha(start_sha, llvm_symlink_dir)
- logging.info('Base llvm hash == %s', start_sha)
-
- llvm_config = git_llvm_rev.LLVMConfig(
- remote='origin', dir=get_llvm_hash.GetAndUpdateLLVMProjectInLLVMTools())
- start_sha = resolve_llvm_ref(llvm_config.dir, start_sha)
-
- find_patches_and_make_cl(chroot_path=chroot_path,
- patches=patches,
- platforms=platforms,
- start_rev=git_llvm_rev.translate_sha_to_rev(
- llvm_config, start_sha),
- llvm_config=llvm_config,
- llvm_symlink_dir=llvm_symlink_dir,
- create_cl=create_cl,
- skip_dependencies=skip_dependencies,
- reviewers=reviewers,
- cc=cc)
- logging.info('Complete.')
+
+ if git_status:
+ error_path = os.path.dirname(os.path.dirname(llvm_symlink_dir))
+ raise ValueError(f"Uncommited changes detected in {error_path}")
+
+ start_sha = resolve_symbolic_sha(start_sha, llvm_symlink_dir)
+ logging.info("Base llvm hash == %s", start_sha)
+
+ llvm_config = git_llvm_rev.LLVMConfig(
+ remote="origin", dir=get_llvm_hash.GetAndUpdateLLVMProjectInLLVMTools()
+ )
+ start_sha = resolve_llvm_ref(llvm_config.dir, start_sha)
+
+ find_patches_and_make_cl(
+ chroot_path=chroot_path,
+ patches=patches,
+ platforms=platforms,
+ start_rev=git_llvm_rev.translate_sha_to_rev(llvm_config, start_sha),
+ llvm_config=llvm_config,
+ llvm_symlink_dir=llvm_symlink_dir,
+ create_cl=create_cl,
+ skip_dependencies=skip_dependencies,
+ reviewers=reviewers,
+ cc=cc,
+ )
+ logging.info("Complete.")
def main():
- chroot.VerifyOutsideChroot()
- logging.basicConfig(
- format='%(asctime)s: %(levelname)s: %(filename)s:%(lineno)d: %(message)s',
- level=logging.INFO,
- )
-
- parser = argparse.ArgumentParser(
- description=__doc__,
- formatter_class=argparse.RawDescriptionHelpFormatter,
- epilog=__DOC_EPILOGUE)
- parser.add_argument('--chroot_path',
- default=os.path.join(os.path.expanduser('~'),
- 'chromiumos'),
- help='the path to the chroot (default: %(default)s)')
- parser.add_argument(
- '--start_sha',
- default='llvm-next',
- help='LLVM SHA that the patch should start applying at. You can specify '
- '"llvm" or "llvm-next", as well. Defaults to %(default)s.')
- parser.add_argument('--sha',
- action='append',
- default=[],
- help='The LLVM git SHA to cherry-pick.')
- parser.add_argument(
- '--differential',
- action='append',
- default=[],
- help='The LLVM differential revision to apply. Example: D1234')
- parser.add_argument(
- '--platform',
- action='append',
- required=True,
- help='Apply this patch to the give platform. Common options include '
- '"chromiumos" and "android". Can be specified multiple times to '
- 'apply to multiple platforms')
- parser.add_argument('--create_cl',
- action='store_true',
- help='Automatically create a CL if specified')
- parser.add_argument(
- '--skip_dependencies',
- action='store_true',
- help="Skips a LLVM differential revision's dependencies. Only valid "
- 'when --differential appears exactly once.')
- args = parser.parse_args()
-
- if not (args.sha or args.differential):
- parser.error('--sha or --differential required')
-
- if args.skip_dependencies and len(args.differential) != 1:
- parser.error("--skip_dependencies is only valid when there's exactly one "
- 'supplied differential')
-
- get_from_upstream(
- chroot_path=args.chroot_path,
- create_cl=args.create_cl,
- start_sha=args.start_sha,
- patches=args.sha + args.differential,
- skip_dependencies=args.skip_dependencies,
- platforms=args.platform,
- )
-
-
-if __name__ == '__main__':
- sys.exit(main())
+ chroot.VerifyOutsideChroot()
+ logging.basicConfig(
+ format="%(asctime)s: %(levelname)s: %(filename)s:%(lineno)d: %(message)s",
+ level=logging.INFO,
+ )
+
+ parser = argparse.ArgumentParser(
+ description=__doc__,
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ epilog=__DOC_EPILOGUE,
+ )
+ parser.add_argument(
+ "--chroot_path",
+ default=os.path.join(os.path.expanduser("~"), "chromiumos"),
+ help="the path to the chroot (default: %(default)s)",
+ )
+ parser.add_argument(
+ "--start_sha",
+ default="llvm-next",
+ help="LLVM SHA that the patch should start applying at. You can specify "
+ '"llvm" or "llvm-next", as well. Defaults to %(default)s.',
+ )
+ parser.add_argument(
+ "--sha",
+ action="append",
+ default=[],
+ help="The LLVM git SHA to cherry-pick.",
+ )
+ parser.add_argument(
+ "--differential",
+ action="append",
+ default=[],
+ help="The LLVM differential revision to apply. Example: D1234."
+ " Cannot be used for changes already merged upstream; use --sha"
+ " instead for those.",
+ )
+ parser.add_argument(
+ "--platform",
+ action="append",
+ required=True,
+ help="Apply this patch to the give platform. Common options include "
+ '"chromiumos" and "android". Can be specified multiple times to '
+ "apply to multiple platforms",
+ )
+ parser.add_argument(
+ "--create_cl",
+ action="store_true",
+ help="Automatically create a CL if specified",
+ )
+ parser.add_argument(
+ "--skip_dependencies",
+ action="store_true",
+ help="Skips a LLVM differential revision's dependencies. Only valid "
+ "when --differential appears exactly once.",
+ )
+ args = parser.parse_args()
+
+ if not (args.sha or args.differential):
+ parser.error("--sha or --differential required")
+
+ if args.skip_dependencies and len(args.differential) != 1:
+ parser.error(
+ "--skip_dependencies is only valid when there's exactly one "
+ "supplied differential"
+ )
+
+ get_from_upstream(
+ chroot_path=args.chroot_path,
+ create_cl=args.create_cl,
+ start_sha=args.start_sha,
+ patches=args.sha + args.differential,
+ skip_dependencies=args.skip_dependencies,
+ platforms=args.platform,
+ )
+
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/llvm_tools/git.py b/llvm_tools/git.py
index 22c7002a..0f56aa0d 100755
--- a/llvm_tools/git.py
+++ b/llvm_tools/git.py
@@ -1,12 +1,11 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Git helper functions."""
-from __future__ import print_function
import collections
import os
@@ -14,122 +13,126 @@ import re
import subprocess
import tempfile
-CommitContents = collections.namedtuple('CommitContents', ['url', 'cl_number'])
+
+CommitContents = collections.namedtuple("CommitContents", ["url", "cl_number"])
def InChroot():
- """Returns True if currently in the chroot."""
- return 'CROS_WORKON_SRCROOT' in os.environ
+ """Returns True if currently in the chroot."""
+ return "CROS_WORKON_SRCROOT" in os.environ
def VerifyOutsideChroot():
- """Checks whether the script invoked was executed in the chroot.
+ """Checks whether the script invoked was executed in the chroot.
- Raises:
- AssertionError: The script was run inside the chroot.
- """
+ Raises:
+ AssertionError: The script was run inside the chroot.
+ """
- assert not InChroot(), 'Script should be run outside the chroot.'
+ assert not InChroot(), "Script should be run outside the chroot."
def CreateBranch(repo, branch):
- """Creates a branch in the given repo.
+ """Creates a branch in the given repo.
- Args:
- repo: The absolute path to the repo.
- branch: The name of the branch to create.
+ Args:
+ repo: The absolute path to the repo.
+ branch: The name of the branch to create.
- Raises:
- ValueError: Failed to create a repo in that directory.
- """
+ Raises:
+ ValueError: Failed to create a repo in that directory.
+ """
- if not os.path.isdir(repo):
- raise ValueError('Invalid directory path provided: %s' % repo)
+ if not os.path.isdir(repo):
+ raise ValueError("Invalid directory path provided: %s" % repo)
- subprocess.check_output(['git', '-C', repo, 'reset', 'HEAD', '--hard'])
+ subprocess.check_output(["git", "-C", repo, "reset", "HEAD", "--hard"])
- subprocess.check_output(['repo', 'start', branch], cwd=repo)
+ subprocess.check_output(["repo", "start", branch], cwd=repo)
def DeleteBranch(repo, branch):
- """Deletes a branch in the given repo.
+ """Deletes a branch in the given repo.
- Args:
- repo: The absolute path of the repo.
- branch: The name of the branch to delete.
+ Args:
+ repo: The absolute path of the repo.
+ branch: The name of the branch to delete.
- Raises:
- ValueError: Failed to delete the repo in that directory.
- """
+ Raises:
+ ValueError: Failed to delete the repo in that directory.
+ """
- if not os.path.isdir(repo):
- raise ValueError('Invalid directory path provided: %s' % repo)
+ if not os.path.isdir(repo):
+ raise ValueError("Invalid directory path provided: %s" % repo)
- subprocess.check_output(['git', '-C', repo, 'checkout', 'cros/main'])
+ subprocess.check_output(["git", "-C", repo, "checkout", "cros/main"])
- subprocess.check_output(['git', '-C', repo, 'reset', 'HEAD', '--hard'])
+ subprocess.check_output(["git", "-C", repo, "reset", "HEAD", "--hard"])
- subprocess.check_output(['git', '-C', repo, 'branch', '-D', branch])
+ subprocess.check_output(["git", "-C", repo, "branch", "-D", branch])
def UploadChanges(repo, branch, commit_messages, reviewers=None, cc=None):
- """Uploads the changes in the specifed branch of the given repo for review.
-
- Args:
- repo: The absolute path to the repo where changes were made.
- branch: The name of the branch to upload.
- commit_messages: A string of commit message(s) (i.e. '[message]'
- of the changes made.
- reviewers: A list of reviewers to add to the CL.
- cc: A list of contributors to CC about the CL.
-
- Returns:
- A nametuple that has two (key, value) pairs, where the first pair is the
- Gerrit commit URL and the second pair is the change list number.
-
- Raises:
- ValueError: Failed to create a commit or failed to upload the
- changes for review.
- """
-
- if not os.path.isdir(repo):
- raise ValueError('Invalid path provided: %s' % repo)
-
- # Create a git commit.
- with tempfile.NamedTemporaryFile(mode='w+t') as f:
- f.write('\n'.join(commit_messages))
- f.flush()
-
- subprocess.check_output(['git', 'commit', '-F', f.name], cwd=repo)
-
- # Upload the changes for review.
- git_args = [
- 'repo',
- 'upload',
- '--yes',
- f'--reviewers={",".join(reviewers)}' if reviewers else '--ne',
- '--no-verify',
- f'--br={branch}',
- ]
-
- if cc:
- git_args.append(f'--cc={",".join(cc)}')
-
- out = subprocess.check_output(
- git_args,
- stderr=subprocess.STDOUT,
- cwd=repo,
- encoding='utf-8',
- )
-
- print(out)
-
- found_url = re.search(
- r'https://chromium-review.googlesource.com/c/'
- r'chromiumos/overlays/chromiumos-overlay/\+/([0-9]+)', out.rstrip())
-
- if not found_url:
- raise ValueError('Failed to find change list URL.')
-
- return CommitContents(
- url=found_url.group(0), cl_number=int(found_url.group(1)))
+ """Uploads the changes in the specifed branch of the given repo for review.
+
+ Args:
+ repo: The absolute path to the repo where changes were made.
+ branch: The name of the branch to upload.
+ commit_messages: A string of commit message(s) (i.e. '[message]'
+ of the changes made.
+ reviewers: A list of reviewers to add to the CL.
+ cc: A list of contributors to CC about the CL.
+
+ Returns:
+ A nametuple that has two (key, value) pairs, where the first pair is the
+ Gerrit commit URL and the second pair is the change list number.
+
+ Raises:
+ ValueError: Failed to create a commit or failed to upload the
+ changes for review.
+ """
+
+ if not os.path.isdir(repo):
+ raise ValueError("Invalid path provided: %s" % repo)
+
+ # Create a git commit.
+ with tempfile.NamedTemporaryFile(mode="w+t") as f:
+ f.write("\n".join(commit_messages))
+ f.flush()
+
+ subprocess.check_output(["git", "commit", "-F", f.name], cwd=repo)
+
+ # Upload the changes for review.
+ git_args = [
+ "repo",
+ "upload",
+ "--yes",
+ f'--reviewers={",".join(reviewers)}' if reviewers else "--ne",
+ "--no-verify",
+ f"--br={branch}",
+ ]
+
+ if cc:
+ git_args.append(f'--cc={",".join(cc)}')
+
+ out = subprocess.check_output(
+ git_args,
+ stderr=subprocess.STDOUT,
+ cwd=repo,
+ encoding="utf-8",
+ )
+
+ print(out)
+
+ found_url = re.search(
+ r"https://chromium-review.googlesource.com/c/"
+ r"chromiumos/overlays/chromiumos-overlay/\+/([0-9]+)",
+ out.rstrip(),
+ )
+
+ if not found_url:
+ raise ValueError("Failed to find change list URL.")
+
+ return CommitContents(
+ url=found_url.group(0), cl_number=int(found_url.group(1))
+ )
diff --git a/llvm_tools/git_llvm_rev.py b/llvm_tools/git_llvm_rev.py
index b62b26e2..3dc34fce 100755
--- a/llvm_tools/git_llvm_rev.py
+++ b/llvm_tools/git_llvm_rev.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -10,7 +10,6 @@ Revision numbers are all of the form '(branch_name, r1234)'. As a shorthand,
r1234 is parsed as '(main, 1234)'.
"""
-from __future__ import print_function
import argparse
import re
@@ -18,7 +17,8 @@ import subprocess
import sys
import typing as t
-MAIN_BRANCH = 'main'
+
+MAIN_BRANCH = "main"
# Note that after base_llvm_sha, we reach The Wild West(TM) of commits.
# So reasonable input that could break us includes:
@@ -33,350 +33,375 @@ MAIN_BRANCH = 'main'
# While saddening, this is something we should probably try to handle
# reasonably.
base_llvm_revision = 375505
-base_llvm_sha = '186155b89c2d2a2f62337081e3ca15f676c9434b'
+base_llvm_sha = "186155b89c2d2a2f62337081e3ca15f676c9434b"
# Represents an LLVM git checkout:
# - |dir| is the directory of the LLVM checkout
# - |remote| is the name of the LLVM remote. Generally it's "origin".
-LLVMConfig = t.NamedTuple('LLVMConfig', (('remote', str), ('dir', str)))
+LLVMConfig = t.NamedTuple("LLVMConfig", (("remote", str), ("dir", str)))
-class Rev(t.NamedTuple('Rev', (('branch', str), ('number', int)))):
- """Represents a LLVM 'revision', a shorthand identifies a LLVM commit."""
+class Rev(t.NamedTuple("Rev", (("branch", str), ("number", int)))):
+ """Represents a LLVM 'revision', a shorthand identifies a LLVM commit."""
- @staticmethod
- def parse(rev: str) -> 'Rev':
- """Parses a Rev from the given string.
+ @staticmethod
+ def parse(rev: str) -> "Rev":
+ """Parses a Rev from the given string.
- Raises a ValueError on a failed parse.
- """
- # Revs are parsed into (${branch_name}, r${commits_since_base_commit})
- # pairs.
- #
- # We support r${commits_since_base_commit} as shorthand for
- # (main, r${commits_since_base_commit}).
- if rev.startswith('r'):
- branch_name = MAIN_BRANCH
- rev_string = rev[1:]
- else:
- match = re.match(r'\((.+), r(\d+)\)', rev)
- if not match:
- raise ValueError("%r isn't a valid revision" % rev)
+ Raises a ValueError on a failed parse.
+ """
+ # Revs are parsed into (${branch_name}, r${commits_since_base_commit})
+ # pairs.
+ #
+ # We support r${commits_since_base_commit} as shorthand for
+ # (main, r${commits_since_base_commit}).
+ if rev.startswith("r"):
+ branch_name = MAIN_BRANCH
+ rev_string = rev[1:]
+ else:
+ match = re.match(r"\((.+), r(\d+)\)", rev)
+ if not match:
+ raise ValueError("%r isn't a valid revision" % rev)
- branch_name, rev_string = match.groups()
+ branch_name, rev_string = match.groups()
- return Rev(branch=branch_name, number=int(rev_string))
+ return Rev(branch=branch_name, number=int(rev_string))
- def __str__(self) -> str:
- branch_name, number = self
- if branch_name == MAIN_BRANCH:
- return 'r%d' % number
- return '(%s, r%d)' % (branch_name, number)
+ def __str__(self) -> str:
+ branch_name, number = self
+ if branch_name == MAIN_BRANCH:
+ return "r%d" % number
+ return "(%s, r%d)" % (branch_name, number)
def is_git_sha(xs: str) -> bool:
- """Returns whether the given string looks like a valid git commit SHA."""
- return len(xs) > 6 and len(xs) <= 40 and all(
- x.isdigit() or 'a' <= x.lower() <= 'f' for x in xs)
+ """Returns whether the given string looks like a valid git commit SHA."""
+ return (
+ len(xs) > 6
+ and len(xs) <= 40
+ and all(x.isdigit() or "a" <= x.lower() <= "f" for x in xs)
+ )
def check_output(command: t.List[str], cwd: str) -> str:
- """Shorthand for subprocess.check_output. Auto-decodes any stdout."""
- result = subprocess.run(
- command,
- cwd=cwd,
- check=True,
- stdin=subprocess.DEVNULL,
- stdout=subprocess.PIPE,
- encoding='utf-8',
- )
- return result.stdout
-
-
-def translate_prebase_sha_to_rev_number(llvm_config: LLVMConfig,
- sha: str) -> int:
- """Translates a sha to a revision number (e.g., "llvm-svn: 1234").
-
- This function assumes that the given SHA is an ancestor of |base_llvm_sha|.
- """
- commit_message = check_output(
- ['git', 'log', '-n1', '--format=%B', sha],
- cwd=llvm_config.dir,
- )
- last_line = commit_message.strip().splitlines()[-1]
- svn_match = re.match(r'^llvm-svn: (\d+)$', last_line)
-
- if not svn_match:
- raise ValueError(
- f"No llvm-svn line found for {sha}, which... shouldn't happen?")
-
- return int(svn_match.group(1))
+ """Shorthand for subprocess.check_output. Auto-decodes any stdout."""
+ result = subprocess.run(
+ command,
+ cwd=cwd,
+ check=True,
+ stdin=subprocess.DEVNULL,
+ stdout=subprocess.PIPE,
+ encoding="utf-8",
+ )
+ return result.stdout
-def translate_sha_to_rev(llvm_config: LLVMConfig, sha_or_ref: str) -> Rev:
- """Translates a sha or git ref to a Rev."""
+def translate_prebase_sha_to_rev_number(
+ llvm_config: LLVMConfig, sha: str
+) -> int:
+ """Translates a sha to a revision number (e.g., "llvm-svn: 1234").
- if is_git_sha(sha_or_ref):
- sha = sha_or_ref
- else:
- sha = check_output(
- ['git', 'rev-parse', sha_or_ref],
+ This function assumes that the given SHA is an ancestor of |base_llvm_sha|.
+ """
+ commit_message = check_output(
+ ["git", "log", "-n1", "--format=%B", sha],
cwd=llvm_config.dir,
)
- sha = sha.strip()
+ last_line = commit_message.strip().splitlines()[-1]
+ svn_match = re.match(r"^llvm-svn: (\d+)$", last_line)
- merge_base = check_output(
- ['git', 'merge-base', base_llvm_sha, sha],
- cwd=llvm_config.dir,
- )
- merge_base = merge_base.strip()
+ if not svn_match:
+ raise ValueError(
+ f"No llvm-svn line found for {sha}, which... shouldn't happen?"
+ )
- if merge_base == base_llvm_sha:
- result = check_output(
+ return int(svn_match.group(1))
+
+
+def translate_sha_to_rev(llvm_config: LLVMConfig, sha_or_ref: str) -> Rev:
+ """Translates a sha or git ref to a Rev."""
+
+ if is_git_sha(sha_or_ref):
+ sha = sha_or_ref
+ else:
+ sha = check_output(
+ ["git", "rev-parse", sha_or_ref],
+ cwd=llvm_config.dir,
+ )
+ sha = sha.strip()
+
+ merge_base = check_output(
+ ["git", "merge-base", base_llvm_sha, sha],
+ cwd=llvm_config.dir,
+ )
+ merge_base = merge_base.strip()
+
+ if merge_base == base_llvm_sha:
+ result = check_output(
+ [
+ "git",
+ "rev-list",
+ "--count",
+ "--first-parent",
+ f"{base_llvm_sha}..{sha}",
+ ],
+ cwd=llvm_config.dir,
+ )
+ count = int(result.strip())
+ return Rev(branch=MAIN_BRANCH, number=count + base_llvm_revision)
+
+ # Otherwise, either:
+ # - |merge_base| is |sha| (we have a guaranteed llvm-svn number on |sha|)
+ # - |merge_base| is neither (we have a guaranteed llvm-svn number on
+ # |merge_base|, but not |sha|)
+ merge_base_number = translate_prebase_sha_to_rev_number(
+ llvm_config, merge_base
+ )
+ if merge_base == sha:
+ return Rev(branch=MAIN_BRANCH, number=merge_base_number)
+
+ distance_from_base = check_output(
[
- 'git',
- 'rev-list',
- '--count',
- '--first-parent',
- f'{base_llvm_sha}..{sha}',
+ "git",
+ "rev-list",
+ "--count",
+ "--first-parent",
+ f"{merge_base}..{sha}",
],
cwd=llvm_config.dir,
)
- count = int(result.strip())
- return Rev(branch=MAIN_BRANCH, number=count + base_llvm_revision)
-
- # Otherwise, either:
- # - |merge_base| is |sha| (we have a guaranteed llvm-svn number on |sha|)
- # - |merge_base| is neither (we have a guaranteed llvm-svn number on
- # |merge_base|, but not |sha|)
- merge_base_number = translate_prebase_sha_to_rev_number(
- llvm_config, merge_base)
- if merge_base == sha:
- return Rev(branch=MAIN_BRANCH, number=merge_base_number)
-
- distance_from_base = check_output(
- [
- 'git',
- 'rev-list',
- '--count',
- '--first-parent',
- f'{merge_base}..{sha}',
- ],
- cwd=llvm_config.dir,
- )
-
- revision_number = merge_base_number + int(distance_from_base.strip())
- branches_containing = check_output(
- ['git', 'branch', '-r', '--contains', sha],
- cwd=llvm_config.dir,
- )
-
- candidates = []
-
- prefix = llvm_config.remote + '/'
- for branch in branches_containing.splitlines():
- branch = branch.strip()
- if branch.startswith(prefix):
- candidates.append(branch[len(prefix):])
-
- if not candidates:
- raise ValueError(
- f'No viable branches found from {llvm_config.remote} with {sha}')
-
- # It seems that some `origin/release/.*` branches have
- # `origin/upstream/release/.*` equivalents, which is... awkward to deal with.
- # Prefer the latter, since that seems to have newer commits than the former.
- # Technically n^2, but len(elements) should be like, tens in the worst case.
- candidates = [x for x in candidates if f'upstream/{x}' not in candidates]
- if len(candidates) != 1:
- raise ValueError(
- f'Ambiguity: multiple branches from {llvm_config.remote} have {sha}: '
- f'{sorted(candidates)}')
-
- return Rev(branch=candidates[0], number=revision_number)
-
-
-def parse_git_commit_messages(stream: t.Iterable[str],
- separator: str) -> t.Iterable[t.Tuple[str, str]]:
- """Parses a stream of git log messages.
-
- These are expected to be in the format:
-
- 40 character sha
- commit
- message
- body
- separator
- 40 character sha
- commit
- message
- body
- separator
- """
-
- lines = iter(stream)
- while True:
- # Looks like a potential bug in pylint? crbug.com/1041148
- # pylint: disable=stop-iteration-return
- sha = next(lines, None)
- if sha is None:
- return
-
- sha = sha.strip()
- assert is_git_sha(sha), f'Invalid git SHA: {sha}'
-
- message = []
- for line in lines:
- if line.strip() == separator:
- break
- message.append(line)
-
- yield sha, ''.join(message)
+
+ revision_number = merge_base_number + int(distance_from_base.strip())
+ branches_containing = check_output(
+ ["git", "branch", "-r", "--contains", sha],
+ cwd=llvm_config.dir,
+ )
+
+ candidates = []
+
+ prefix = llvm_config.remote + "/"
+ for branch in branches_containing.splitlines():
+ branch = branch.strip()
+ if branch.startswith(prefix):
+ candidates.append(branch[len(prefix) :])
+
+ if not candidates:
+ raise ValueError(
+ f"No viable branches found from {llvm_config.remote} with {sha}"
+ )
+
+ # It seems that some `origin/release/.*` branches have
+ # `origin/upstream/release/.*` equivalents, which is... awkward to deal with.
+ # Prefer the latter, since that seems to have newer commits than the former.
+ # Technically n^2, but len(elements) should be like, tens in the worst case.
+ candidates = [x for x in candidates if f"upstream/{x}" not in candidates]
+ if len(candidates) != 1:
+ raise ValueError(
+ f"Ambiguity: multiple branches from {llvm_config.remote} have {sha}: "
+ f"{sorted(candidates)}"
+ )
+
+ return Rev(branch=candidates[0], number=revision_number)
+
+
+def parse_git_commit_messages(
+ stream: t.Iterable[str], separator: str
+) -> t.Iterable[t.Tuple[str, str]]:
+ """Parses a stream of git log messages.
+
+ These are expected to be in the format:
+
+ 40 character sha
+ commit
+ message
+ body
+ separator
+ 40 character sha
+ commit
+ message
+ body
+ separator
+ """
+
+ lines = iter(stream)
+ while True:
+ # Looks like a potential bug in pylint? crbug.com/1041148
+ # pylint: disable=stop-iteration-return
+ sha = next(lines, None)
+ if sha is None:
+ return
+
+ sha = sha.strip()
+ assert is_git_sha(sha), f"Invalid git SHA: {sha}"
+
+ message = []
+ for line in lines:
+ if line.strip() == separator:
+ break
+ message.append(line)
+
+ yield sha, "".join(message)
def translate_prebase_rev_to_sha(llvm_config: LLVMConfig, rev: Rev) -> str:
- """Translates a Rev to a SHA.
-
- This function assumes that the given rev refers to a commit that's an
- ancestor of |base_llvm_sha|.
- """
- # Because reverts may include reverted commit messages, we can't just |-n1|
- # and pick that.
- separator = '>!' * 80
- looking_for = f'llvm-svn: {rev.number}'
-
- git_command = [
- 'git', 'log', '--grep', f'^{looking_for}$', f'--format=%H%n%B{separator}',
- base_llvm_sha
- ]
-
- subp = subprocess.Popen(
- git_command,
- cwd=llvm_config.dir,
- stdin=subprocess.DEVNULL,
- stdout=subprocess.PIPE,
- encoding='utf-8',
- )
-
- with subp:
- for sha, message in parse_git_commit_messages(subp.stdout, separator):
- last_line = message.splitlines()[-1]
- if last_line.strip() == looking_for:
- subp.terminate()
- return sha
-
- if subp.returncode:
- raise subprocess.CalledProcessError(subp.returncode, git_command)
- raise ValueError(f'No commit with revision {rev} found')
+ """Translates a Rev to a SHA.
+
+ This function assumes that the given rev refers to a commit that's an
+ ancestor of |base_llvm_sha|.
+ """
+ # Because reverts may include reverted commit messages, we can't just |-n1|
+ # and pick that.
+ separator = ">!" * 80
+ looking_for = f"llvm-svn: {rev.number}"
+
+ git_command = [
+ "git",
+ "log",
+ "--grep",
+ f"^{looking_for}$",
+ f"--format=%H%n%B{separator}",
+ base_llvm_sha,
+ ]
+
+ subp = subprocess.Popen(
+ git_command,
+ cwd=llvm_config.dir,
+ stdin=subprocess.DEVNULL,
+ stdout=subprocess.PIPE,
+ encoding="utf-8",
+ )
+
+ with subp:
+ for sha, message in parse_git_commit_messages(subp.stdout, separator):
+ last_line = message.splitlines()[-1]
+ if last_line.strip() == looking_for:
+ subp.terminate()
+ return sha
+
+ if subp.returncode:
+ raise subprocess.CalledProcessError(subp.returncode, git_command)
+ raise ValueError(f"No commit with revision {rev} found")
def translate_rev_to_sha(llvm_config: LLVMConfig, rev: Rev) -> str:
- """Translates a Rev to a SHA.
-
- Raises a ValueError if the given Rev doesn't exist in the given config.
- """
- branch, number = rev
-
- if branch == MAIN_BRANCH:
- if number < base_llvm_revision:
- return translate_prebase_rev_to_sha(llvm_config, rev)
- base_sha = base_llvm_sha
- base_revision_number = base_llvm_revision
- else:
- base_sha = check_output(
- ['git', 'merge-base', base_llvm_sha, f'{llvm_config.remote}/{branch}'],
+ """Translates a Rev to a SHA.
+
+ Raises a ValueError if the given Rev doesn't exist in the given config.
+ """
+ branch, number = rev
+
+ if branch == MAIN_BRANCH:
+ if number < base_llvm_revision:
+ return translate_prebase_rev_to_sha(llvm_config, rev)
+ base_sha = base_llvm_sha
+ base_revision_number = base_llvm_revision
+ else:
+ base_sha = check_output(
+ [
+ "git",
+ "merge-base",
+ base_llvm_sha,
+ f"{llvm_config.remote}/{branch}",
+ ],
+ cwd=llvm_config.dir,
+ )
+ base_sha = base_sha.strip()
+ if base_sha == base_llvm_sha:
+ base_revision_number = base_llvm_revision
+ else:
+ base_revision_number = translate_prebase_sha_to_rev_number(
+ llvm_config, base_sha
+ )
+
+ # Alternatively, we could |git log --format=%H|, but git is *super* fast
+ # about rev walking/counting locally compared to long |log|s, so we walk back
+ # twice.
+ head = check_output(
+ ["git", "rev-parse", f"{llvm_config.remote}/{branch}"],
+ cwd=llvm_config.dir,
+ )
+ branch_head_sha = head.strip()
+
+ commit_number = number - base_revision_number
+ revs_between_str = check_output(
+ [
+ "git",
+ "rev-list",
+ "--count",
+ "--first-parent",
+ f"{base_sha}..{branch_head_sha}",
+ ],
+ cwd=llvm_config.dir,
+ )
+ revs_between = int(revs_between_str.strip())
+
+ commits_behind_head = revs_between - commit_number
+ if commits_behind_head < 0:
+ raise ValueError(
+ f"Revision {rev} is past {llvm_config.remote}/{branch}. Try updating "
+ "your tree?"
+ )
+
+ result = check_output(
+ ["git", "rev-parse", f"{branch_head_sha}~{commits_behind_head}"],
cwd=llvm_config.dir,
)
- base_sha = base_sha.strip()
- if base_sha == base_llvm_sha:
- base_revision_number = base_llvm_revision
- else:
- base_revision_number = translate_prebase_sha_to_rev_number(
- llvm_config, base_sha)
-
- # Alternatively, we could |git log --format=%H|, but git is *super* fast
- # about rev walking/counting locally compared to long |log|s, so we walk back
- # twice.
- head = check_output(
- ['git', 'rev-parse', f'{llvm_config.remote}/{branch}'],
- cwd=llvm_config.dir,
- )
- branch_head_sha = head.strip()
-
- commit_number = number - base_revision_number
- revs_between_str = check_output(
- [
- 'git',
- 'rev-list',
- '--count',
- '--first-parent',
- f'{base_sha}..{branch_head_sha}',
- ],
- cwd=llvm_config.dir,
- )
- revs_between = int(revs_between_str.strip())
-
- commits_behind_head = revs_between - commit_number
- if commits_behind_head < 0:
- raise ValueError(
- f'Revision {rev} is past {llvm_config.remote}/{branch}. Try updating '
- 'your tree?')
-
- result = check_output(
- ['git', 'rev-parse', f'{branch_head_sha}~{commits_behind_head}'],
- cwd=llvm_config.dir,
- )
-
- return result.strip()
-
-
-def find_root_llvm_dir(root_dir: str = '.') -> str:
- """Finds the root of an LLVM directory starting at |root_dir|.
-
- Raises a subprocess.CalledProcessError if no git directory is found.
- """
- result = check_output(
- ['git', 'rev-parse', '--show-toplevel'],
- cwd=root_dir,
- )
- return result.strip()
+
+ return result.strip()
+
+
+def find_root_llvm_dir(root_dir: str = ".") -> str:
+ """Finds the root of an LLVM directory starting at |root_dir|.
+
+ Raises a subprocess.CalledProcessError if no git directory is found.
+ """
+ result = check_output(
+ ["git", "rev-parse", "--show-toplevel"],
+ cwd=root_dir,
+ )
+ return result.strip()
def main(argv: t.List[str]) -> None:
- parser = argparse.ArgumentParser(description=__doc__)
- parser.add_argument(
- '--llvm_dir',
- help='LLVM directory to consult for git history, etc. Autodetected '
- 'if cwd is inside of an LLVM tree')
- parser.add_argument(
- '--upstream',
- default='origin',
- help="LLVM upstream's remote name. Defaults to %(default)s.")
- sha_or_rev = parser.add_mutually_exclusive_group(required=True)
- sha_or_rev.add_argument(
- '--sha', help='A git SHA (or ref) to convert to a rev')
- sha_or_rev.add_argument('--rev', help='A rev to convert into a sha')
- opts = parser.parse_args(argv)
-
- llvm_dir = opts.llvm_dir
- if llvm_dir is None:
- try:
- llvm_dir = find_root_llvm_dir()
- except subprocess.CalledProcessError:
- parser.error("Couldn't autodetect an LLVM tree; please use --llvm_dir")
-
- config = LLVMConfig(
- remote=opts.upstream,
- dir=opts.llvm_dir or find_root_llvm_dir(),
- )
-
- if opts.sha:
- rev = translate_sha_to_rev(config, opts.sha)
- print(rev)
- else:
- sha = translate_rev_to_sha(config, Rev.parse(opts.rev))
- print(sha)
-
-
-if __name__ == '__main__':
- main(sys.argv[1:])
+ parser = argparse.ArgumentParser(description=__doc__)
+ parser.add_argument(
+ "--llvm_dir",
+ help="LLVM directory to consult for git history, etc. Autodetected "
+ "if cwd is inside of an LLVM tree",
+ )
+ parser.add_argument(
+ "--upstream",
+ default="origin",
+ help="LLVM upstream's remote name. Defaults to %(default)s.",
+ )
+ sha_or_rev = parser.add_mutually_exclusive_group(required=True)
+ sha_or_rev.add_argument(
+ "--sha", help="A git SHA (or ref) to convert to a rev"
+ )
+ sha_or_rev.add_argument("--rev", help="A rev to convert into a sha")
+ opts = parser.parse_args(argv)
+
+ llvm_dir = opts.llvm_dir
+ if llvm_dir is None:
+ try:
+ llvm_dir = find_root_llvm_dir()
+ except subprocess.CalledProcessError:
+ parser.error(
+ "Couldn't autodetect an LLVM tree; please use --llvm_dir"
+ )
+
+ config = LLVMConfig(
+ remote=opts.upstream,
+ dir=opts.llvm_dir or find_root_llvm_dir(),
+ )
+
+ if opts.sha:
+ rev = translate_sha_to_rev(config, opts.sha)
+ print(rev)
+ else:
+ sha = translate_rev_to_sha(config, Rev.parse(opts.rev))
+ print(sha)
+
+
+if __name__ == "__main__":
+ main(sys.argv[1:])
diff --git a/llvm_tools/git_llvm_rev_test.py b/llvm_tools/git_llvm_rev_test.py
index d05093a8..86a4b202 100755
--- a/llvm_tools/git_llvm_rev_test.py
+++ b/llvm_tools/git_llvm_rev_test.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -9,122 +9,143 @@
import unittest
import git_llvm_rev
-import llvm_project
from git_llvm_rev import MAIN_BRANCH
+import llvm_project
def get_llvm_config() -> git_llvm_rev.LLVMConfig:
- return git_llvm_rev.LLVMConfig(
- dir=llvm_project.get_location(), remote='origin')
+ return git_llvm_rev.LLVMConfig(
+ dir=llvm_project.get_location(), remote="origin"
+ )
class Test(unittest.TestCase):
- """Test cases for git_llvm_rev."""
-
- def rev_to_sha_with_round_trip(self, rev: git_llvm_rev.Rev) -> str:
- config = get_llvm_config()
- sha = git_llvm_rev.translate_rev_to_sha(config, rev)
- roundtrip_rev = git_llvm_rev.translate_sha_to_rev(config, sha)
- self.assertEqual(roundtrip_rev, rev)
- return sha
-
- def test_sha_to_rev_on_base_sha_works(self) -> None:
- sha = self.rev_to_sha_with_round_trip(
- git_llvm_rev.Rev(
- branch=MAIN_BRANCH, number=git_llvm_rev.base_llvm_revision))
- self.assertEqual(sha, git_llvm_rev.base_llvm_sha)
-
- def test_sha_to_rev_prior_to_base_rev_works(self) -> None:
- sha = self.rev_to_sha_with_round_trip(
- git_llvm_rev.Rev(branch=MAIN_BRANCH, number=375000))
- self.assertEqual(sha, '2f6da767f13b8fd81f840c211d405fea32ac9db7')
-
- def test_sha_to_rev_after_base_rev_works(self) -> None:
- sha = self.rev_to_sha_with_round_trip(
- git_llvm_rev.Rev(branch=MAIN_BRANCH, number=375506))
- self.assertEqual(sha, '3bf7fddeb05655d9baed4cc69e13535c677ed1dd')
-
- def test_llvm_svn_parsing_runs_ignore_reverts(self) -> None:
- # This commit has a revert that mentions the reverted llvm-svn in the
- # commit message.
-
- # Commit which performed the revert
- sha = self.rev_to_sha_with_round_trip(
- git_llvm_rev.Rev(branch=MAIN_BRANCH, number=374895))
- self.assertEqual(sha, '1731fc88d1fa1fa55edd056db73a339b415dd5d6')
-
- # Commit that was reverted
- sha = self.rev_to_sha_with_round_trip(
- git_llvm_rev.Rev(branch=MAIN_BRANCH, number=374841))
- self.assertEqual(sha, '2a1386c81de504b5bda44fbecf3f7b4cdfd748fc')
-
- def test_imaginary_revs_raise(self) -> None:
- with self.assertRaises(ValueError) as r:
- git_llvm_rev.translate_rev_to_sha(
- get_llvm_config(),
- git_llvm_rev.Rev(branch=MAIN_BRANCH, number=9999999))
-
- self.assertIn('Try updating your tree?', str(r.exception))
-
- def test_merge_commits_count_as_one_commit_crbug1041079(self) -> None:
- # This CL merged _a lot_ of commits in. Verify a few hand-computed
- # properties about it.
- merge_sha_rev_number = 4496 + git_llvm_rev.base_llvm_revision
- sha = self.rev_to_sha_with_round_trip(
- git_llvm_rev.Rev(branch=MAIN_BRANCH, number=merge_sha_rev_number))
- self.assertEqual(sha, '0f0d0ed1c78f1a80139a1f2133fad5284691a121')
-
- sha = self.rev_to_sha_with_round_trip(
- git_llvm_rev.Rev(branch=MAIN_BRANCH, number=merge_sha_rev_number - 1))
- self.assertEqual(sha, '6f635f90929da9545dd696071a829a1a42f84b30')
-
- sha = self.rev_to_sha_with_round_trip(
- git_llvm_rev.Rev(branch=MAIN_BRANCH, number=merge_sha_rev_number + 1))
- self.assertEqual(sha, '199700a5cfeedf227619f966aa3125cef18bc958')
-
- # NOTE: The below tests have _zz_ in their name as an optimization. Iterating
- # on a quick test is painful when these larger tests come before it and take
- # 7secs to run. Python's unittest module guarantees tests are run in
- # alphabetical order by their method name, so...
- #
- # If you're wondering, the slow part is `git branch -r --contains`. I imagine
- # it's going to be very cold code, so I'm not inclined to optimize it much.
-
- def test_zz_branch_revs_work_after_merge_points_and_svn_cutoff(self) -> None:
- # Arbitrary 9.x commit without an attached llvm-svn: value.
- sha = self.rev_to_sha_with_round_trip(
- git_llvm_rev.Rev(branch='upstream/release/9.x', number=366670))
- self.assertEqual(sha, '4e858e4ac00b59f064da4e1f7e276916e7d296aa')
-
- def test_zz_branch_revs_work_at_merge_points(self) -> None:
- rev_number = 366426
- backing_sha = 'c89a3d78f43d81b9cff7b9248772ddf14d21b749'
-
- sha = self.rev_to_sha_with_round_trip(
- git_llvm_rev.Rev(branch=MAIN_BRANCH, number=rev_number))
- self.assertEqual(sha, backing_sha)
-
- # Note that this won't round-trip: since this commit is on the main
- # branch, we'll pick main for this. That's fine.
- sha = git_llvm_rev.translate_rev_to_sha(
- get_llvm_config(),
- git_llvm_rev.Rev(branch='upstream/release/9.x', number=rev_number))
- self.assertEqual(sha, backing_sha)
-
- def test_zz_branch_revs_work_after_merge_points(self) -> None:
- # Picking the commit on the 9.x branch after the merge-base for that +
- # main. Note that this is where llvm-svn numbers should diverge from
- # ours, and are therefore untrustworthy. The commit for this *does* have a
- # different `llvm-svn:` string than we should have.
- sha = self.rev_to_sha_with_round_trip(
- git_llvm_rev.Rev(branch='upstream/release/9.x', number=366427))
- self.assertEqual(sha, '2cf681a11aea459b50d712abc7136f7129e4d57f')
+ """Test cases for git_llvm_rev."""
+
+ def rev_to_sha_with_round_trip(self, rev: git_llvm_rev.Rev) -> str:
+ config = get_llvm_config()
+ sha = git_llvm_rev.translate_rev_to_sha(config, rev)
+ roundtrip_rev = git_llvm_rev.translate_sha_to_rev(config, sha)
+ self.assertEqual(roundtrip_rev, rev)
+ return sha
+
+ def test_sha_to_rev_on_base_sha_works(self) -> None:
+ sha = self.rev_to_sha_with_round_trip(
+ git_llvm_rev.Rev(
+ branch=MAIN_BRANCH, number=git_llvm_rev.base_llvm_revision
+ )
+ )
+ self.assertEqual(sha, git_llvm_rev.base_llvm_sha)
+
+ def test_sha_to_rev_prior_to_base_rev_works(self) -> None:
+ sha = self.rev_to_sha_with_round_trip(
+ git_llvm_rev.Rev(branch=MAIN_BRANCH, number=375000)
+ )
+ self.assertEqual(sha, "2f6da767f13b8fd81f840c211d405fea32ac9db7")
+
+ def test_sha_to_rev_after_base_rev_works(self) -> None:
+ sha = self.rev_to_sha_with_round_trip(
+ git_llvm_rev.Rev(branch=MAIN_BRANCH, number=375506)
+ )
+ self.assertEqual(sha, "3bf7fddeb05655d9baed4cc69e13535c677ed1dd")
+
+ def test_llvm_svn_parsing_runs_ignore_reverts(self) -> None:
+ # This commit has a revert that mentions the reverted llvm-svn in the
+ # commit message.
+
+ # Commit which performed the revert
+ sha = self.rev_to_sha_with_round_trip(
+ git_llvm_rev.Rev(branch=MAIN_BRANCH, number=374895)
+ )
+ self.assertEqual(sha, "1731fc88d1fa1fa55edd056db73a339b415dd5d6")
+
+ # Commit that was reverted
+ sha = self.rev_to_sha_with_round_trip(
+ git_llvm_rev.Rev(branch=MAIN_BRANCH, number=374841)
+ )
+ self.assertEqual(sha, "2a1386c81de504b5bda44fbecf3f7b4cdfd748fc")
+
+ def test_imaginary_revs_raise(self) -> None:
+ with self.assertRaises(ValueError) as r:
+ git_llvm_rev.translate_rev_to_sha(
+ get_llvm_config(),
+ git_llvm_rev.Rev(branch=MAIN_BRANCH, number=9999999),
+ )
+
+ self.assertIn("Try updating your tree?", str(r.exception))
+
+ def test_merge_commits_count_as_one_commit_crbug1041079(self) -> None:
+ # This CL merged _a lot_ of commits in. Verify a few hand-computed
+ # properties about it.
+ merge_sha_rev_number = 4496 + git_llvm_rev.base_llvm_revision
+ sha = self.rev_to_sha_with_round_trip(
+ git_llvm_rev.Rev(branch=MAIN_BRANCH, number=merge_sha_rev_number)
+ )
+ self.assertEqual(sha, "0f0d0ed1c78f1a80139a1f2133fad5284691a121")
+
+ sha = self.rev_to_sha_with_round_trip(
+ git_llvm_rev.Rev(
+ branch=MAIN_BRANCH, number=merge_sha_rev_number - 1
+ )
+ )
+ self.assertEqual(sha, "6f635f90929da9545dd696071a829a1a42f84b30")
+
+ sha = self.rev_to_sha_with_round_trip(
+ git_llvm_rev.Rev(
+ branch=MAIN_BRANCH, number=merge_sha_rev_number + 1
+ )
+ )
+ self.assertEqual(sha, "199700a5cfeedf227619f966aa3125cef18bc958")
+
+ # NOTE: The below tests have _zz_ in their name as an optimization. Iterating
+ # on a quick test is painful when these larger tests come before it and take
+ # 7secs to run. Python's unittest module guarantees tests are run in
+ # alphabetical order by their method name, so...
+ #
+ # If you're wondering, the slow part is `git branch -r --contains`. I imagine
+ # it's going to be very cold code, so I'm not inclined to optimize it much.
+
+ def test_zz_branch_revs_work_after_merge_points_and_svn_cutoff(
+ self,
+ ) -> None:
+ # Arbitrary 9.x commit without an attached llvm-svn: value.
+ sha = self.rev_to_sha_with_round_trip(
+ git_llvm_rev.Rev(branch="upstream/release/9.x", number=366670)
+ )
+ self.assertEqual(sha, "4e858e4ac00b59f064da4e1f7e276916e7d296aa")
+
+ def test_zz_branch_revs_work_at_merge_points(self) -> None:
+ rev_number = 366426
+ backing_sha = "c89a3d78f43d81b9cff7b9248772ddf14d21b749"
+
+ sha = self.rev_to_sha_with_round_trip(
+ git_llvm_rev.Rev(branch=MAIN_BRANCH, number=rev_number)
+ )
+ self.assertEqual(sha, backing_sha)
+
+ # Note that this won't round-trip: since this commit is on the main
+ # branch, we'll pick main for this. That's fine.
+ sha = git_llvm_rev.translate_rev_to_sha(
+ get_llvm_config(),
+ git_llvm_rev.Rev(branch="upstream/release/9.x", number=rev_number),
+ )
+ self.assertEqual(sha, backing_sha)
+
+ def test_zz_branch_revs_work_after_merge_points(self) -> None:
+ # Picking the commit on the 9.x branch after the merge-base for that +
+ # main. Note that this is where llvm-svn numbers should diverge from
+ # ours, and are therefore untrustworthy. The commit for this *does* have a
+ # different `llvm-svn:` string than we should have.
+ sha = self.rev_to_sha_with_round_trip(
+ git_llvm_rev.Rev(branch="upstream/release/9.x", number=366427)
+ )
+ self.assertEqual(sha, "2cf681a11aea459b50d712abc7136f7129e4d57f")
# FIXME: When release/10.x happens, it may be nice to have a test-case
# generally covering that, since it's the first branch that we have to travel
# back to the base commit for.
-if __name__ == '__main__':
- llvm_project.ensure_up_to_date()
- unittest.main()
+if __name__ == "__main__":
+ llvm_project.ensure_up_to_date()
+ unittest.main()
diff --git a/llvm_tools/git_unittest.py b/llvm_tools/git_unittest.py
index 47927716..ce21e6c9 100755
--- a/llvm_tools/git_unittest.py
+++ b/llvm_tools/git_unittest.py
@@ -1,12 +1,11 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for git helper functions."""
-from __future__ import print_function
import os
import subprocess
@@ -16,130 +15,148 @@ import unittest.mock as mock
import git
+
# These are unittests; protected access is OK to a point.
# pylint: disable=protected-access
class HelperFunctionsTest(unittest.TestCase):
- """Test class for updating LLVM hashes of packages."""
-
- @mock.patch.object(os.path, 'isdir', return_value=False)
- def testFailedToCreateBranchForInvalidDirectoryPath(self, mock_isdir):
- path_to_repo = '/invalid/path/to/repo'
- branch = 'branch-name'
-
- # Verify the exception is raised when provided an invalid directory path.
- with self.assertRaises(ValueError) as err:
- git.CreateBranch(path_to_repo, branch)
-
- self.assertEqual(
- str(err.exception),
- 'Invalid directory path provided: %s' % path_to_repo)
-
- mock_isdir.assert_called_once()
-
- @mock.patch.object(os.path, 'isdir', return_value=True)
- @mock.patch.object(subprocess, 'check_output', return_value=None)
- def testSuccessfullyCreatedBranch(self, mock_command_output, mock_isdir):
- path_to_repo = '/path/to/repo'
- branch = 'branch-name'
-
- git.CreateBranch(path_to_repo, branch)
-
- mock_isdir.assert_called_once_with(path_to_repo)
-
- self.assertEqual(mock_command_output.call_count, 2)
-
- @mock.patch.object(os.path, 'isdir', return_value=False)
- def testFailedToDeleteBranchForInvalidDirectoryPath(self, mock_isdir):
- path_to_repo = '/invalid/path/to/repo'
- branch = 'branch-name'
-
- # Verify the exception is raised on an invalid repo path.
- with self.assertRaises(ValueError) as err:
- git.DeleteBranch(path_to_repo, branch)
-
- self.assertEqual(
- str(err.exception),
- 'Invalid directory path provided: %s' % path_to_repo)
-
- mock_isdir.assert_called_once()
-
- @mock.patch.object(os.path, 'isdir', return_value=True)
- @mock.patch.object(subprocess, 'check_output', return_value=None)
- def testSuccessfullyDeletedBranch(self, mock_command_output, mock_isdir):
- path_to_repo = '/valid/path/to/repo'
- branch = 'branch-name'
-
- git.DeleteBranch(path_to_repo, branch)
-
- mock_isdir.assert_called_once_with(path_to_repo)
-
- self.assertEqual(mock_command_output.call_count, 3)
-
- @mock.patch.object(os.path, 'isdir', return_value=False)
- def testFailedToUploadChangesForInvalidDirectoryPath(self, mock_isdir):
- path_to_repo = '/some/path/to/repo'
- branch = 'update-LLVM_NEXT_HASH-a123testhash3'
- commit_messages = ['Test message']
-
- # Verify exception is raised when on an invalid repo path.
- with self.assertRaises(ValueError) as err:
- git.UploadChanges(path_to_repo, branch, commit_messages)
-
- self.assertEqual(
- str(err.exception), 'Invalid path provided: %s' % path_to_repo)
-
- mock_isdir.assert_called_once()
-
- @mock.patch.object(os.path, 'isdir', return_value=True)
- @mock.patch.object(subprocess, 'check_output')
- @mock.patch.object(tempfile, 'NamedTemporaryFile')
- def testSuccessfullyUploadedChangesForReview(self, mock_tempfile,
- mock_commands, mock_isdir):
-
- path_to_repo = '/some/path/to/repo'
- branch = 'branch-name'
- commit_messages = ['Test message']
- mock_tempfile.return_value.__enter__.return_value.name = 'tmp'
-
- # A test CL generated by `repo upload`.
- mock_commands.side_effect = [
- None,
- ('remote: https://chromium-review.googlesource.'
- 'com/c/chromiumos/overlays/chromiumos-overlay/'
- '+/193147 Fix stdout')
- ]
- change_list = git.UploadChanges(path_to_repo, branch, commit_messages)
-
- self.assertEqual(change_list.cl_number, 193147)
-
- mock_isdir.assert_called_once_with(path_to_repo)
-
- expected_command = [
- 'git', 'commit', '-F',
- mock_tempfile.return_value.__enter__.return_value.name
- ]
- self.assertEqual(mock_commands.call_args_list[0],
- mock.call(expected_command, cwd=path_to_repo))
-
- expected_cmd = [
- 'repo', 'upload', '--yes', '--ne', '--no-verify',
- '--br=%s' % branch
- ]
- self.assertEqual(
- mock_commands.call_args_list[1],
- mock.call(
- expected_cmd,
- stderr=subprocess.STDOUT,
- cwd=path_to_repo,
- encoding='utf-8'))
-
- self.assertEqual(
- change_list.url,
- 'https://chromium-review.googlesource.com/c/chromiumos/overlays/'
- 'chromiumos-overlay/+/193147')
+ """Test class for updating LLVM hashes of packages."""
+ @mock.patch.object(os.path, "isdir", return_value=False)
+ def testFailedToCreateBranchForInvalidDirectoryPath(self, mock_isdir):
+ path_to_repo = "/invalid/path/to/repo"
+ branch = "branch-name"
-if __name__ == '__main__':
- unittest.main()
+ # Verify the exception is raised when provided an invalid directory path.
+ with self.assertRaises(ValueError) as err:
+ git.CreateBranch(path_to_repo, branch)
+
+ self.assertEqual(
+ str(err.exception),
+ "Invalid directory path provided: %s" % path_to_repo,
+ )
+
+ mock_isdir.assert_called_once()
+
+ @mock.patch.object(os.path, "isdir", return_value=True)
+ @mock.patch.object(subprocess, "check_output", return_value=None)
+ def testSuccessfullyCreatedBranch(self, mock_command_output, mock_isdir):
+ path_to_repo = "/path/to/repo"
+ branch = "branch-name"
+
+ git.CreateBranch(path_to_repo, branch)
+
+ mock_isdir.assert_called_once_with(path_to_repo)
+
+ self.assertEqual(mock_command_output.call_count, 2)
+
+ @mock.patch.object(os.path, "isdir", return_value=False)
+ def testFailedToDeleteBranchForInvalidDirectoryPath(self, mock_isdir):
+ path_to_repo = "/invalid/path/to/repo"
+ branch = "branch-name"
+
+ # Verify the exception is raised on an invalid repo path.
+ with self.assertRaises(ValueError) as err:
+ git.DeleteBranch(path_to_repo, branch)
+
+ self.assertEqual(
+ str(err.exception),
+ "Invalid directory path provided: %s" % path_to_repo,
+ )
+
+ mock_isdir.assert_called_once()
+
+ @mock.patch.object(os.path, "isdir", return_value=True)
+ @mock.patch.object(subprocess, "check_output", return_value=None)
+ def testSuccessfullyDeletedBranch(self, mock_command_output, mock_isdir):
+ path_to_repo = "/valid/path/to/repo"
+ branch = "branch-name"
+
+ git.DeleteBranch(path_to_repo, branch)
+
+ mock_isdir.assert_called_once_with(path_to_repo)
+
+ self.assertEqual(mock_command_output.call_count, 3)
+
+ @mock.patch.object(os.path, "isdir", return_value=False)
+ def testFailedToUploadChangesForInvalidDirectoryPath(self, mock_isdir):
+ path_to_repo = "/some/path/to/repo"
+ branch = "update-LLVM_NEXT_HASH-a123testhash3"
+ commit_messages = ["Test message"]
+
+ # Verify exception is raised when on an invalid repo path.
+ with self.assertRaises(ValueError) as err:
+ git.UploadChanges(path_to_repo, branch, commit_messages)
+
+ self.assertEqual(
+ str(err.exception), "Invalid path provided: %s" % path_to_repo
+ )
+
+ mock_isdir.assert_called_once()
+
+ @mock.patch.object(os.path, "isdir", return_value=True)
+ @mock.patch.object(subprocess, "check_output")
+ @mock.patch.object(tempfile, "NamedTemporaryFile")
+ def testSuccessfullyUploadedChangesForReview(
+ self, mock_tempfile, mock_commands, mock_isdir
+ ):
+
+ path_to_repo = "/some/path/to/repo"
+ branch = "branch-name"
+ commit_messages = ["Test message"]
+ mock_tempfile.return_value.__enter__.return_value.name = "tmp"
+
+ # A test CL generated by `repo upload`.
+ mock_commands.side_effect = [
+ None,
+ (
+ "remote: https://chromium-review.googlesource."
+ "com/c/chromiumos/overlays/chromiumos-overlay/"
+ "+/193147 Fix stdout"
+ ),
+ ]
+ change_list = git.UploadChanges(path_to_repo, branch, commit_messages)
+
+ self.assertEqual(change_list.cl_number, 193147)
+
+ mock_isdir.assert_called_once_with(path_to_repo)
+
+ expected_command = [
+ "git",
+ "commit",
+ "-F",
+ mock_tempfile.return_value.__enter__.return_value.name,
+ ]
+ self.assertEqual(
+ mock_commands.call_args_list[0],
+ mock.call(expected_command, cwd=path_to_repo),
+ )
+
+ expected_cmd = [
+ "repo",
+ "upload",
+ "--yes",
+ "--ne",
+ "--no-verify",
+ "--br=%s" % branch,
+ ]
+ self.assertEqual(
+ mock_commands.call_args_list[1],
+ mock.call(
+ expected_cmd,
+ stderr=subprocess.STDOUT,
+ cwd=path_to_repo,
+ encoding="utf-8",
+ ),
+ )
+
+ self.assertEqual(
+ change_list.url,
+ "https://chromium-review.googlesource.com/c/chromiumos/overlays/"
+ "chromiumos-overlay/+/193147",
+ )
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/llvm_tools/llvm_bisection.py b/llvm_tools/llvm_bisection.py
index 0148efd2..0b851ebe 100755
--- a/llvm_tools/llvm_bisection.py
+++ b/llvm_tools/llvm_bisection.py
@@ -1,12 +1,11 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Performs bisection on LLVM based off a .JSON file."""
-from __future__ import print_function
import argparse
import enum
@@ -25,352 +24,442 @@ import update_tryjob_status
class BisectionExitStatus(enum.Enum):
- """Exit code when performing bisection."""
+ """Exit code when performing bisection."""
- # Means that there are no more revisions available to bisect.
- BISECTION_COMPLETE = 126
+ # Means that there are no more revisions available to bisect.
+ BISECTION_COMPLETE = 126
def GetCommandLineArgs():
- """Parses the command line for the command line arguments."""
-
- # Default path to the chroot if a path is not specified.
- cros_root = os.path.expanduser('~')
- cros_root = os.path.join(cros_root, 'chromiumos')
-
- # Create parser and add optional command-line arguments.
- parser = argparse.ArgumentParser(
- description='Bisects LLVM via tracking a JSON file.')
-
- # Add argument for other change lists that want to run alongside the tryjob
- # which has a change list of updating a package's git hash.
- parser.add_argument(
- '--parallel',
- type=int,
- default=3,
- help='How many tryjobs to create between the last good version and '
- 'the first bad version (default: %(default)s)')
-
- # Add argument for the good LLVM revision for bisection.
- parser.add_argument('--start_rev',
- required=True,
- type=int,
- help='The good revision for the bisection.')
-
- # Add argument for the bad LLVM revision for bisection.
- parser.add_argument('--end_rev',
- required=True,
- type=int,
- help='The bad revision for the bisection.')
-
- # Add argument for the absolute path to the file that contains information on
- # the previous tested svn version.
- parser.add_argument(
- '--last_tested',
- required=True,
- help='the absolute path to the file that contains the tryjobs')
-
- # Add argument for the absolute path to the LLVM source tree.
- parser.add_argument(
- '--src_path',
- help='the path to the LLVM source tree to use (used for retrieving the '
- 'git hash of each version between the last good version and first bad '
- 'version)')
-
- # Add argument for other change lists that want to run alongside the tryjob
- # which has a change list of updating a package's git hash.
- parser.add_argument(
- '--extra_change_lists',
- type=int,
- nargs='+',
- help='change lists that would like to be run alongside the change list '
- 'of updating the packages')
-
- # Add argument for custom options for the tryjob.
- parser.add_argument('--options',
- required=False,
- nargs='+',
- help='options to use for the tryjob testing')
-
- # Add argument for the builder to use for the tryjob.
- parser.add_argument('--builder',
- required=True,
- help='builder to use for the tryjob testing')
-
- # Add argument for the description of the tryjob.
- parser.add_argument('--description',
- required=False,
- nargs='+',
- help='the description of the tryjob')
-
- # Add argument for a specific chroot path.
- parser.add_argument('--chroot_path',
- default=cros_root,
- help='the path to the chroot (default: %(default)s)')
-
- # Add argument for whether to display command contents to `stdout`.
- parser.add_argument('--verbose',
- action='store_true',
- help='display contents of a command to the terminal '
- '(default: %(default)s)')
-
- # Add argument for whether to display command contents to `stdout`.
- parser.add_argument('--nocleanup',
- action='store_false',
- dest='cleanup',
- help='Abandon CLs created for bisectoin')
-
- args_output = parser.parse_args()
-
- assert args_output.start_rev < args_output.end_rev, (
- 'Start revision %d is >= end revision %d' %
- (args_output.start_rev, args_output.end_rev))
-
- if args_output.last_tested and not args_output.last_tested.endswith('.json'):
- raise ValueError('Filed provided %s does not end in ".json"' %
- args_output.last_tested)
-
- return args_output
+ """Parses the command line for the command line arguments."""
+
+ # Default path to the chroot if a path is not specified.
+ cros_root = os.path.expanduser("~")
+ cros_root = os.path.join(cros_root, "chromiumos")
+
+ # Create parser and add optional command-line arguments.
+ parser = argparse.ArgumentParser(
+ description="Bisects LLVM via tracking a JSON file."
+ )
+
+ # Add argument for other change lists that want to run alongside the tryjob
+ # which has a change list of updating a package's git hash.
+ parser.add_argument(
+ "--parallel",
+ type=int,
+ default=3,
+ help="How many tryjobs to create between the last good version and "
+ "the first bad version (default: %(default)s)",
+ )
+
+ # Add argument for the good LLVM revision for bisection.
+ parser.add_argument(
+ "--start_rev",
+ required=True,
+ type=int,
+ help="The good revision for the bisection.",
+ )
+
+ # Add argument for the bad LLVM revision for bisection.
+ parser.add_argument(
+ "--end_rev",
+ required=True,
+ type=int,
+ help="The bad revision for the bisection.",
+ )
+
+ # Add argument for the absolute path to the file that contains information on
+ # the previous tested svn version.
+ parser.add_argument(
+ "--last_tested",
+ required=True,
+ help="the absolute path to the file that contains the tryjobs",
+ )
+
+ # Add argument for the absolute path to the LLVM source tree.
+ parser.add_argument(
+ "--src_path",
+ help="the path to the LLVM source tree to use (used for retrieving the "
+ "git hash of each version between the last good version and first bad "
+ "version)",
+ )
+
+ # Add argument for other change lists that want to run alongside the tryjob
+ # which has a change list of updating a package's git hash.
+ parser.add_argument(
+ "--extra_change_lists",
+ type=int,
+ nargs="+",
+ help="change lists that would like to be run alongside the change list "
+ "of updating the packages",
+ )
+
+ # Add argument for custom options for the tryjob.
+ parser.add_argument(
+ "--options",
+ required=False,
+ nargs="+",
+ help="options to use for the tryjob testing",
+ )
+
+ # Add argument for the builder to use for the tryjob.
+ parser.add_argument(
+ "--builder", required=True, help="builder to use for the tryjob testing"
+ )
+
+ # Add argument for the description of the tryjob.
+ parser.add_argument(
+ "--description",
+ required=False,
+ nargs="+",
+ help="the description of the tryjob",
+ )
+
+ # Add argument for a specific chroot path.
+ parser.add_argument(
+ "--chroot_path",
+ default=cros_root,
+ help="the path to the chroot (default: %(default)s)",
+ )
+
+ # Add argument for whether to display command contents to `stdout`.
+ parser.add_argument(
+ "--verbose",
+ action="store_true",
+ help="display contents of a command to the terminal "
+ "(default: %(default)s)",
+ )
+
+ # Add argument for whether to display command contents to `stdout`.
+ parser.add_argument(
+ "--nocleanup",
+ action="store_false",
+ dest="cleanup",
+ help="Abandon CLs created for bisectoin",
+ )
+
+ args_output = parser.parse_args()
+
+ assert (
+ args_output.start_rev < args_output.end_rev
+ ), "Start revision %d is >= end revision %d" % (
+ args_output.start_rev,
+ args_output.end_rev,
+ )
+
+ if args_output.last_tested and not args_output.last_tested.endswith(
+ ".json"
+ ):
+ raise ValueError(
+ 'Filed provided %s does not end in ".json"'
+ % args_output.last_tested
+ )
+
+ return args_output
def GetRemainingRange(start, end, tryjobs):
- """Gets the start and end intervals in 'json_file'.
-
- Args:
- start: The start version of the bisection provided via the command line.
- end: The end version of the bisection provided via the command line.
- tryjobs: A list of tryjobs where each element is in the following format:
- [
- {[TRYJOB_INFORMATION]},
- {[TRYJOB_INFORMATION]},
- ...,
- {[TRYJOB_INFORMATION]}
- ]
-
- Returns:
- The new start version and end version for bisection, a set of revisions
- that are 'pending' and a set of revisions that are to be skipped.
-
- Raises:
- ValueError: The value for 'status' is missing or there is a mismatch
- between 'start' and 'end' compared to the 'start' and 'end' in the JSON
- file.
- AssertionError: The new start version is >= than the new end version.
- """
-
- if not tryjobs:
- return start, end, {}, {}
-
- # Verify that each tryjob has a value for the 'status' key.
- for cur_tryjob_dict in tryjobs:
- if not cur_tryjob_dict.get('status', None):
- raise ValueError('"status" is missing or has no value, please '
- 'go to %s and update it' % cur_tryjob_dict['link'])
-
- all_bad_revisions = [end]
- all_bad_revisions.extend(
- cur_tryjob['rev'] for cur_tryjob in tryjobs
- if cur_tryjob['status'] == update_tryjob_status.TryjobStatus.BAD.value)
-
- # The minimum value for the 'bad' field in the tryjobs is the new end
- # version.
- bad_rev = min(all_bad_revisions)
-
- all_good_revisions = [start]
- all_good_revisions.extend(
- cur_tryjob['rev'] for cur_tryjob in tryjobs
- if cur_tryjob['status'] == update_tryjob_status.TryjobStatus.GOOD.value)
-
- # The maximum value for the 'good' field in the tryjobs is the new start
- # version.
- good_rev = max(all_good_revisions)
-
- # The good version should always be strictly less than the bad version;
- # otherwise, bisection is broken.
- assert good_rev < bad_rev, ('Bisection is broken because %d (good) is >= '
- '%d (bad)' % (good_rev, bad_rev))
-
- # Find all revisions that are 'pending' within 'good_rev' and 'bad_rev'.
- #
- # NOTE: The intent is to not launch tryjobs between 'good_rev' and 'bad_rev'
- # that have already been launched (this set is used when constructing the
- # list of revisions to launch tryjobs for).
- pending_revisions = {
- tryjob['rev']
- for tryjob in tryjobs
- if tryjob['status'] == update_tryjob_status.TryjobStatus.PENDING.value
- and good_rev < tryjob['rev'] < bad_rev
- }
-
- # Find all revisions that are to be skipped within 'good_rev' and 'bad_rev'.
- #
- # NOTE: The intent is to not launch tryjobs between 'good_rev' and 'bad_rev'
- # that have already been marked as 'skip' (this set is used when constructing
- # the list of revisions to launch tryjobs for).
- skip_revisions = {
- tryjob['rev']
- for tryjob in tryjobs
- if tryjob['status'] == update_tryjob_status.TryjobStatus.SKIP.value
- and good_rev < tryjob['rev'] < bad_rev
- }
-
- return good_rev, bad_rev, pending_revisions, skip_revisions
-
-
-def GetCommitsBetween(start, end, parallel, src_path, pending_revisions,
- skip_revisions):
- """Determines the revisions between start and end."""
-
- with get_llvm_hash.LLVMHash().CreateTempDirectory() as temp_dir:
- # We have guaranteed contiguous revision numbers after this,
- # and that guarnatee simplifies things considerably, so we don't
- # support anything before it.
- assert start >= git_llvm_rev.base_llvm_revision, f'{start} was too long ago'
-
- with get_llvm_hash.CreateTempLLVMRepo(temp_dir) as new_repo:
- if not src_path:
- src_path = new_repo
- index_step = (end - (start + 1)) // (parallel + 1)
- if not index_step:
- index_step = 1
- revisions = [
- rev for rev in range(start + 1, end, index_step)
- if rev not in pending_revisions and rev not in skip_revisions
+ """Gets the start and end intervals in 'json_file'.
+
+ Args:
+ start: The start version of the bisection provided via the command line.
+ end: The end version of the bisection provided via the command line.
+ tryjobs: A list of tryjobs where each element is in the following format:
+ [
+ {[TRYJOB_INFORMATION]},
+ {[TRYJOB_INFORMATION]},
+ ...,
+ {[TRYJOB_INFORMATION]}
]
- git_hashes = [
- get_llvm_hash.GetGitHashFrom(src_path, rev) for rev in revisions
- ]
- return revisions, git_hashes
-
-
-def Bisect(revisions, git_hashes, bisect_state, last_tested, update_packages,
- chroot_path, patch_metadata_file, extra_change_lists, options,
- builder, verbose):
- """Adds tryjobs and updates the status file with the new tryjobs."""
- try:
- for svn_revision, git_hash in zip(revisions, git_hashes):
- tryjob_dict = modify_a_tryjob.AddTryjob(update_packages, git_hash,
- svn_revision, chroot_path,
- patch_metadata_file,
- extra_change_lists, options,
- builder, verbose, svn_revision)
-
- bisect_state['jobs'].append(tryjob_dict)
- finally:
- # Do not want to lose progress if there is an exception.
- if last_tested:
- new_file = '%s.new' % last_tested
- with open(new_file, 'w') as json_file:
- json.dump(bisect_state, json_file, indent=4, separators=(',', ': '))
-
- os.rename(new_file, last_tested)
+ Returns:
+ The new start version and end version for bisection, a set of revisions
+ that are 'pending' and a set of revisions that are to be skipped.
+
+ Raises:
+ ValueError: The value for 'status' is missing or there is a mismatch
+ between 'start' and 'end' compared to the 'start' and 'end' in the JSON
+ file.
+ AssertionError: The new start version is >= than the new end version.
+ """
+
+ if not tryjobs:
+ return start, end, {}, {}
+
+ # Verify that each tryjob has a value for the 'status' key.
+ for cur_tryjob_dict in tryjobs:
+ if not cur_tryjob_dict.get("status", None):
+ raise ValueError(
+ '"status" is missing or has no value, please '
+ "go to %s and update it" % cur_tryjob_dict["link"]
+ )
+
+ all_bad_revisions = [end]
+ all_bad_revisions.extend(
+ cur_tryjob["rev"]
+ for cur_tryjob in tryjobs
+ if cur_tryjob["status"] == update_tryjob_status.TryjobStatus.BAD.value
+ )
+
+ # The minimum value for the 'bad' field in the tryjobs is the new end
+ # version.
+ bad_rev = min(all_bad_revisions)
+
+ all_good_revisions = [start]
+ all_good_revisions.extend(
+ cur_tryjob["rev"]
+ for cur_tryjob in tryjobs
+ if cur_tryjob["status"] == update_tryjob_status.TryjobStatus.GOOD.value
+ )
+
+ # The maximum value for the 'good' field in the tryjobs is the new start
+ # version.
+ good_rev = max(all_good_revisions)
+
+ # The good version should always be strictly less than the bad version;
+ # otherwise, bisection is broken.
+ assert (
+ good_rev < bad_rev
+ ), "Bisection is broken because %d (good) is >= " "%d (bad)" % (
+ good_rev,
+ bad_rev,
+ )
+
+ # Find all revisions that are 'pending' within 'good_rev' and 'bad_rev'.
+ #
+ # NOTE: The intent is to not launch tryjobs between 'good_rev' and 'bad_rev'
+ # that have already been launched (this set is used when constructing the
+ # list of revisions to launch tryjobs for).
+ pending_revisions = {
+ tryjob["rev"]
+ for tryjob in tryjobs
+ if tryjob["status"] == update_tryjob_status.TryjobStatus.PENDING.value
+ and good_rev < tryjob["rev"] < bad_rev
+ }
+
+ # Find all revisions that are to be skipped within 'good_rev' and 'bad_rev'.
+ #
+ # NOTE: The intent is to not launch tryjobs between 'good_rev' and 'bad_rev'
+ # that have already been marked as 'skip' (this set is used when constructing
+ # the list of revisions to launch tryjobs for).
+ skip_revisions = {
+ tryjob["rev"]
+ for tryjob in tryjobs
+ if tryjob["status"] == update_tryjob_status.TryjobStatus.SKIP.value
+ and good_rev < tryjob["rev"] < bad_rev
+ }
+
+ return good_rev, bad_rev, pending_revisions, skip_revisions
+
+
+def GetCommitsBetween(
+ start, end, parallel, src_path, pending_revisions, skip_revisions
+):
+ """Determines the revisions between start and end."""
+
+ with get_llvm_hash.LLVMHash().CreateTempDirectory() as temp_dir:
+ # We have guaranteed contiguous revision numbers after this,
+ # and that guarnatee simplifies things considerably, so we don't
+ # support anything before it.
+ assert (
+ start >= git_llvm_rev.base_llvm_revision
+ ), f"{start} was too long ago"
+
+ with get_llvm_hash.CreateTempLLVMRepo(temp_dir) as new_repo:
+ if not src_path:
+ src_path = new_repo
+ index_step = (end - (start + 1)) // (parallel + 1)
+ if not index_step:
+ index_step = 1
+ revisions = [
+ rev
+ for rev in range(start + 1, end, index_step)
+ if rev not in pending_revisions and rev not in skip_revisions
+ ]
+ git_hashes = [
+ get_llvm_hash.GetGitHashFrom(src_path, rev) for rev in revisions
+ ]
+ return revisions, git_hashes
+
+
+def Bisect(
+ revisions,
+ git_hashes,
+ bisect_state,
+ last_tested,
+ update_packages,
+ chroot_path,
+ patch_metadata_file,
+ extra_change_lists,
+ options,
+ builder,
+ verbose,
+):
+ """Adds tryjobs and updates the status file with the new tryjobs."""
+
+ try:
+ for svn_revision, git_hash in zip(revisions, git_hashes):
+ tryjob_dict = modify_a_tryjob.AddTryjob(
+ update_packages,
+ git_hash,
+ svn_revision,
+ chroot_path,
+ patch_metadata_file,
+ extra_change_lists,
+ options,
+ builder,
+ verbose,
+ svn_revision,
+ )
+
+ bisect_state["jobs"].append(tryjob_dict)
+ finally:
+ # Do not want to lose progress if there is an exception.
+ if last_tested:
+ new_file = "%s.new" % last_tested
+ with open(new_file, "w") as json_file:
+ json.dump(
+ bisect_state, json_file, indent=4, separators=(",", ": ")
+ )
+
+ os.rename(new_file, last_tested)
def LoadStatusFile(last_tested, start, end):
- """Loads the status file for bisection."""
-
- try:
- with open(last_tested) as f:
- return json.load(f)
- except IOError as err:
- if err.errno != errno.ENOENT:
- raise
+ """Loads the status file for bisection."""
- return {'start': start, 'end': end, 'jobs': []}
-
-
-def main(args_output):
- """Bisects LLVM commits.
-
- Raises:
- AssertionError: The script was run inside the chroot.
- """
-
- chroot.VerifyOutsideChroot()
- patch_metadata_file = 'PATCHES.json'
- start = args_output.start_rev
- end = args_output.end_rev
-
- bisect_state = LoadStatusFile(args_output.last_tested, start, end)
- if start != bisect_state['start'] or end != bisect_state['end']:
- raise ValueError(
- f'The start {start} or the end {end} version provided is '
- f'different than "start" {bisect_state["start"]} or "end" '
- f'{bisect_state["end"]} in the .JSON file')
-
- # Pending and skipped revisions are between 'start_rev' and 'end_rev'.
- start_rev, end_rev, pending_revs, skip_revs = GetRemainingRange(
- start, end, bisect_state['jobs'])
-
- revisions, git_hashes = GetCommitsBetween(start_rev, end_rev,
- args_output.parallel,
- args_output.src_path, pending_revs,
- skip_revs)
-
- # No more revisions between 'start_rev' and 'end_rev', so
- # bisection is complete.
- #
- # This is determined by finding all valid revisions between 'start_rev'
- # and 'end_rev' and that are NOT in the 'pending' and 'skipped' set.
- if not revisions:
- if pending_revs:
- # Some tryjobs are not finished which may change the actual bad
- # commit/revision when those tryjobs are finished.
- no_revisions_message = (f'No revisions between start {start_rev} '
- f'and end {end_rev} to create tryjobs\n')
-
- if pending_revs:
- no_revisions_message += ('The following tryjobs are pending:\n' +
- '\n'.join(str(rev)
- for rev in pending_revs) + '\n')
-
- if skip_revs:
- no_revisions_message += ('The following tryjobs were skipped:\n' +
- '\n'.join(str(rev)
- for rev in skip_revs) + '\n')
-
- raise ValueError(no_revisions_message)
-
- print(f'Finished bisecting for {args_output.last_tested}')
- if args_output.src_path:
- bad_llvm_hash = get_llvm_hash.GetGitHashFrom(args_output.src_path,
- end_rev)
- else:
- bad_llvm_hash = get_llvm_hash.LLVMHash().GetLLVMHash(end_rev)
- print(f'The bad revision is {end_rev} and its commit hash is '
- f'{bad_llvm_hash}')
- if skip_revs:
- skip_revs_message = ('\nThe following revisions were skipped:\n' +
- '\n'.join(str(rev) for rev in skip_revs))
- print(skip_revs_message)
-
- if args_output.cleanup:
- # Abandon all the CLs created for bisection
- gerrit = os.path.join(args_output.chroot_path, 'chromite/bin/gerrit')
- for build in bisect_state['jobs']:
- try:
- subprocess.check_output(
- [gerrit, 'abandon', str(build['cl'])],
- stderr=subprocess.STDOUT,
- encoding='utf-8')
- except subprocess.CalledProcessError as err:
- # the CL may have been abandoned
- if 'chromite.lib.gob_util.GOBError' not in err.output:
+ try:
+ with open(last_tested) as f:
+ return json.load(f)
+ except IOError as err:
+ if err.errno != errno.ENOENT:
raise
- return BisectionExitStatus.BISECTION_COMPLETE.value
+ return {"start": start, "end": end, "jobs": []}
- for rev in revisions:
- if update_tryjob_status.FindTryjobIndex(rev,
- bisect_state['jobs']) is not None:
- raise ValueError(f'Revision {rev} exists already in "jobs"')
- Bisect(revisions, git_hashes, bisect_state, args_output.last_tested,
- update_chromeos_llvm_hash.DEFAULT_PACKAGES, args_output.chroot_path,
- patch_metadata_file, args_output.extra_change_lists,
- args_output.options, args_output.builder, args_output.verbose)
-
-
-if __name__ == '__main__':
- sys.exit(main(GetCommandLineArgs()))
+def main(args_output):
+ """Bisects LLVM commits.
+
+ Raises:
+ AssertionError: The script was run inside the chroot.
+ """
+
+ chroot.VerifyOutsideChroot()
+ patch_metadata_file = "PATCHES.json"
+ start = args_output.start_rev
+ end = args_output.end_rev
+
+ bisect_state = LoadStatusFile(args_output.last_tested, start, end)
+ if start != bisect_state["start"] or end != bisect_state["end"]:
+ raise ValueError(
+ f"The start {start} or the end {end} version provided is "
+ f'different than "start" {bisect_state["start"]} or "end" '
+ f'{bisect_state["end"]} in the .JSON file'
+ )
+
+ # Pending and skipped revisions are between 'start_rev' and 'end_rev'.
+ start_rev, end_rev, pending_revs, skip_revs = GetRemainingRange(
+ start, end, bisect_state["jobs"]
+ )
+
+ revisions, git_hashes = GetCommitsBetween(
+ start_rev,
+ end_rev,
+ args_output.parallel,
+ args_output.src_path,
+ pending_revs,
+ skip_revs,
+ )
+
+ # No more revisions between 'start_rev' and 'end_rev', so
+ # bisection is complete.
+ #
+ # This is determined by finding all valid revisions between 'start_rev'
+ # and 'end_rev' and that are NOT in the 'pending' and 'skipped' set.
+ if not revisions:
+ if pending_revs:
+ # Some tryjobs are not finished which may change the actual bad
+ # commit/revision when those tryjobs are finished.
+ no_revisions_message = (
+ f"No revisions between start {start_rev} "
+ f"and end {end_rev} to create tryjobs\n"
+ )
+
+ if pending_revs:
+ no_revisions_message += (
+ "The following tryjobs are pending:\n"
+ + "\n".join(str(rev) for rev in pending_revs)
+ + "\n"
+ )
+
+ if skip_revs:
+ no_revisions_message += (
+ "The following tryjobs were skipped:\n"
+ + "\n".join(str(rev) for rev in skip_revs)
+ + "\n"
+ )
+
+ raise ValueError(no_revisions_message)
+
+ print(f"Finished bisecting for {args_output.last_tested}")
+ if args_output.src_path:
+ bad_llvm_hash = get_llvm_hash.GetGitHashFrom(
+ args_output.src_path, end_rev
+ )
+ else:
+ bad_llvm_hash = get_llvm_hash.LLVMHash().GetLLVMHash(end_rev)
+ print(
+ f"The bad revision is {end_rev} and its commit hash is "
+ f"{bad_llvm_hash}"
+ )
+ if skip_revs:
+ skip_revs_message = (
+ "\nThe following revisions were skipped:\n"
+ + "\n".join(str(rev) for rev in skip_revs)
+ )
+ print(skip_revs_message)
+
+ if args_output.cleanup:
+ # Abandon all the CLs created for bisection
+ gerrit = os.path.join(
+ args_output.chroot_path, "chromite/bin/gerrit"
+ )
+ for build in bisect_state["jobs"]:
+ try:
+ subprocess.check_output(
+ [gerrit, "abandon", str(build["cl"])],
+ stderr=subprocess.STDOUT,
+ encoding="utf-8",
+ )
+ except subprocess.CalledProcessError as err:
+ # the CL may have been abandoned
+ if "chromite.lib.gob_util.GOBError" not in err.output:
+ raise
+
+ return BisectionExitStatus.BISECTION_COMPLETE.value
+
+ for rev in revisions:
+ if (
+ update_tryjob_status.FindTryjobIndex(rev, bisect_state["jobs"])
+ is not None
+ ):
+ raise ValueError(f'Revision {rev} exists already in "jobs"')
+
+ Bisect(
+ revisions,
+ git_hashes,
+ bisect_state,
+ args_output.last_tested,
+ update_chromeos_llvm_hash.DEFAULT_PACKAGES,
+ args_output.chroot_path,
+ patch_metadata_file,
+ args_output.extra_change_lists,
+ args_output.options,
+ args_output.builder,
+ args_output.verbose,
+ )
+
+
+if __name__ == "__main__":
+ sys.exit(main(GetCommandLineArgs()))
diff --git a/llvm_tools/llvm_bisection_unittest.py b/llvm_tools/llvm_bisection_unittest.py
index cc22dfa4..1e86a678 100755
--- a/llvm_tools/llvm_bisection_unittest.py
+++ b/llvm_tools/llvm_bisection_unittest.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -8,7 +8,6 @@
"""Tests for LLVM bisection."""
-from __future__ import print_function
import json
import os
@@ -25,485 +24,562 @@ import test_helpers
class LLVMBisectionTest(unittest.TestCase):
- """Unittests for LLVM bisection."""
-
- def testGetRemainingRangePassed(self):
- start = 100
- end = 150
-
- test_tryjobs = [{
- 'rev': 110,
- 'status': 'good',
- 'link': 'https://some_tryjob_1_url.com'
- }, {
- 'rev': 120,
- 'status': 'good',
- 'link': 'https://some_tryjob_2_url.com'
- }, {
- 'rev': 130,
- 'status': 'pending',
- 'link': 'https://some_tryjob_3_url.com'
- }, {
- 'rev': 135,
- 'status': 'skip',
- 'link': 'https://some_tryjob_4_url.com'
- }, {
- 'rev': 140,
- 'status': 'bad',
- 'link': 'https://some_tryjob_5_url.com'
- }]
-
- # Tuple consists of the new good revision, the new bad revision, a set of
- # 'pending' revisions, and a set of 'skip' revisions.
- expected_revisions_tuple = 120, 140, {130}, {135}
-
- self.assertEqual(
- llvm_bisection.GetRemainingRange(start, end, test_tryjobs),
- expected_revisions_tuple)
-
- def testGetRemainingRangeFailedWithMissingStatus(self):
- start = 100
- end = 150
-
- test_tryjobs = [{
- 'rev': 105,
- 'status': 'good',
- 'link': 'https://some_tryjob_1_url.com'
- }, {
- 'rev': 120,
- 'status': None,
- 'link': 'https://some_tryjob_2_url.com'
- }, {
- 'rev': 140,
- 'status': 'bad',
- 'link': 'https://some_tryjob_3_url.com'
- }]
-
- with self.assertRaises(ValueError) as err:
- llvm_bisection.GetRemainingRange(start, end, test_tryjobs)
-
- error_message = ('"status" is missing or has no value, please '
- 'go to %s and update it' % test_tryjobs[1]['link'])
- self.assertEqual(str(err.exception), error_message)
-
- def testGetRemainingRangeFailedWithInvalidRange(self):
- start = 100
- end = 150
-
- test_tryjobs = [{
- 'rev': 110,
- 'status': 'bad',
- 'link': 'https://some_tryjob_1_url.com'
- }, {
- 'rev': 125,
- 'status': 'skip',
- 'link': 'https://some_tryjob_2_url.com'
- }, {
- 'rev': 140,
- 'status': 'good',
- 'link': 'https://some_tryjob_3_url.com'
- }]
-
- with self.assertRaises(AssertionError) as err:
- llvm_bisection.GetRemainingRange(start, end, test_tryjobs)
-
- expected_error_message = ('Bisection is broken because %d (good) is >= '
- '%d (bad)' %
- (test_tryjobs[2]['rev'], test_tryjobs[0]['rev']))
-
- self.assertEqual(str(err.exception), expected_error_message)
-
- @mock.patch.object(get_llvm_hash, 'GetGitHashFrom')
- def testGetCommitsBetweenPassed(self, mock_get_git_hash):
- start = git_llvm_rev.base_llvm_revision
- end = start + 10
- test_pending_revisions = {start + 7}
- test_skip_revisions = {
- start + 1, start + 2, start + 4, start + 8, start + 9
- }
- parallel = 3
- abs_path_to_src = '/abs/path/to/src'
-
- revs = ['a123testhash3', 'a123testhash5']
- mock_get_git_hash.side_effect = revs
-
- git_hashes = [
- git_llvm_rev.base_llvm_revision + 3, git_llvm_rev.base_llvm_revision + 5
- ]
-
- self.assertEqual(
- llvm_bisection.GetCommitsBetween(start, end, parallel, abs_path_to_src,
- test_pending_revisions,
- test_skip_revisions),
- (git_hashes, revs))
-
- def testLoadStatusFilePassedWithExistingFile(self):
- start = 100
- end = 150
-
- test_bisect_state = {'start': start, 'end': end, 'jobs': []}
-
- # Simulate that the status file exists.
- with test_helpers.CreateTemporaryJsonFile() as temp_json_file:
- with open(temp_json_file, 'w') as f:
- test_helpers.WritePrettyJsonFile(test_bisect_state, f)
-
- self.assertEqual(
- llvm_bisection.LoadStatusFile(temp_json_file, start, end),
- test_bisect_state)
-
- def testLoadStatusFilePassedWithoutExistingFile(self):
- start = 200
- end = 250
-
- expected_bisect_state = {'start': start, 'end': end, 'jobs': []}
-
- last_tested = '/abs/path/to/file_that_does_not_exist.json'
-
- self.assertEqual(
- llvm_bisection.LoadStatusFile(last_tested, start, end),
- expected_bisect_state)
-
- @mock.patch.object(modify_a_tryjob, 'AddTryjob')
- def testBisectPassed(self, mock_add_tryjob):
-
- git_hash_list = ['a123testhash1', 'a123testhash2', 'a123testhash3']
- revisions_list = [102, 104, 106]
-
- # Simulate behavior of `AddTryjob()` when successfully launched a tryjob for
- # the updated packages.
- @test_helpers.CallCountsToMockFunctions
- def MockAddTryjob(call_count, _packages, _git_hash, _revision, _chroot_path,
- _patch_file, _extra_cls, _options, _builder, _verbose,
- _svn_revision):
-
- if call_count < 2:
- return {'rev': revisions_list[call_count], 'status': 'pending'}
-
- # Simulate an exception happened along the way when updating the
- # packages' `LLVM_NEXT_HASH`.
- if call_count == 2:
- raise ValueError('Unable to launch tryjob')
-
- assert False, 'Called `AddTryjob()` more than expected.'
-
- # Use the test function to simulate `AddTryjob()`.
- mock_add_tryjob.side_effect = MockAddTryjob
-
- start = 100
- end = 110
-
- bisection_contents = {'start': start, 'end': end, 'jobs': []}
-
- args_output = test_helpers.ArgsOutputTest()
-
- packages = ['sys-devel/llvm']
- patch_file = '/abs/path/to/PATCHES.json'
-
- # Create a temporary .JSON file to simulate a status file for bisection.
- with test_helpers.CreateTemporaryJsonFile() as temp_json_file:
- with open(temp_json_file, 'w') as f:
- test_helpers.WritePrettyJsonFile(bisection_contents, f)
-
- # Verify that the status file is updated when an exception happened when
- # attempting to launch a revision (i.e. progress is not lost).
- with self.assertRaises(ValueError) as err:
- llvm_bisection.Bisect(revisions_list, git_hash_list, bisection_contents,
- temp_json_file, packages, args_output.chroot_path,
- patch_file, args_output.extra_change_lists,
- args_output.options, args_output.builders,
- args_output.verbose)
-
- expected_bisection_contents = {
- 'start':
- start,
- 'end':
- end,
- 'jobs': [{
- 'rev': revisions_list[0],
- 'status': 'pending'
- }, {
- 'rev': revisions_list[1],
- 'status': 'pending'
- }]
- }
-
- # Verify that the launched tryjobs were added to the status file when
- # an exception happened.
- with open(temp_json_file) as f:
- json_contents = json.load(f)
-
- self.assertEqual(json_contents, expected_bisection_contents)
-
- self.assertEqual(str(err.exception), 'Unable to launch tryjob')
-
- self.assertEqual(mock_add_tryjob.call_count, 3)
-
- @mock.patch.object(subprocess, 'check_output', return_value=None)
- @mock.patch.object(
- get_llvm_hash.LLVMHash, 'GetLLVMHash', return_value='a123testhash4')
- @mock.patch.object(llvm_bisection, 'GetCommitsBetween')
- @mock.patch.object(llvm_bisection, 'GetRemainingRange')
- @mock.patch.object(llvm_bisection, 'LoadStatusFile')
- @mock.patch.object(chroot, 'VerifyOutsideChroot', return_value=True)
- def testMainPassed(self, mock_outside_chroot, mock_load_status_file,
- mock_get_range, mock_get_revision_and_hash_list,
- _mock_get_bad_llvm_hash, mock_abandon_cl):
-
- start = 500
- end = 502
- cl = 1
-
- bisect_state = {
- 'start': start,
- 'end': end,
- 'jobs': [{
- 'rev': 501,
- 'status': 'bad',
- 'cl': cl
- }]
- }
-
- skip_revisions = {501}
- pending_revisions = {}
-
- mock_load_status_file.return_value = bisect_state
-
- mock_get_range.return_value = (start, end, pending_revisions,
- skip_revisions)
-
- mock_get_revision_and_hash_list.return_value = [], []
-
- args_output = test_helpers.ArgsOutputTest()
- args_output.start_rev = start
- args_output.end_rev = end
- args_output.parallel = 3
- args_output.src_path = None
- args_output.chroot_path = 'somepath'
- args_output.cleanup = True
-
- self.assertEqual(
- llvm_bisection.main(args_output),
- llvm_bisection.BisectionExitStatus.BISECTION_COMPLETE.value)
-
- mock_outside_chroot.assert_called_once()
-
- mock_load_status_file.assert_called_once()
-
- mock_get_range.assert_called_once()
-
- mock_get_revision_and_hash_list.assert_called_once()
-
- mock_abandon_cl.assert_called_once()
- self.assertEqual(
- mock_abandon_cl.call_args,
- mock.call(
- [
- os.path.join(args_output.chroot_path, 'chromite/bin/gerrit'),
- 'abandon',
- str(cl),
- ],
- stderr=subprocess.STDOUT,
- encoding='utf-8',
- ))
-
- @mock.patch.object(llvm_bisection, 'LoadStatusFile')
- @mock.patch.object(chroot, 'VerifyOutsideChroot', return_value=True)
- def testMainFailedWithInvalidRange(self, mock_outside_chroot,
- mock_load_status_file):
-
- start = 500
- end = 502
-
- bisect_state = {
- 'start': start - 1,
- 'end': end,
- }
-
- mock_load_status_file.return_value = bisect_state
-
- args_output = test_helpers.ArgsOutputTest()
- args_output.start_rev = start
- args_output.end_rev = end
- args_output.parallel = 3
- args_output.src_path = None
+ """Unittests for LLVM bisection."""
+
+ def testGetRemainingRangePassed(self):
+ start = 100
+ end = 150
+
+ test_tryjobs = [
+ {
+ "rev": 110,
+ "status": "good",
+ "link": "https://some_tryjob_1_url.com",
+ },
+ {
+ "rev": 120,
+ "status": "good",
+ "link": "https://some_tryjob_2_url.com",
+ },
+ {
+ "rev": 130,
+ "status": "pending",
+ "link": "https://some_tryjob_3_url.com",
+ },
+ {
+ "rev": 135,
+ "status": "skip",
+ "link": "https://some_tryjob_4_url.com",
+ },
+ {
+ "rev": 140,
+ "status": "bad",
+ "link": "https://some_tryjob_5_url.com",
+ },
+ ]
+
+ # Tuple consists of the new good revision, the new bad revision, a set of
+ # 'pending' revisions, and a set of 'skip' revisions.
+ expected_revisions_tuple = 120, 140, {130}, {135}
+
+ self.assertEqual(
+ llvm_bisection.GetRemainingRange(start, end, test_tryjobs),
+ expected_revisions_tuple,
+ )
+
+ def testGetRemainingRangeFailedWithMissingStatus(self):
+ start = 100
+ end = 150
+
+ test_tryjobs = [
+ {
+ "rev": 105,
+ "status": "good",
+ "link": "https://some_tryjob_1_url.com",
+ },
+ {
+ "rev": 120,
+ "status": None,
+ "link": "https://some_tryjob_2_url.com",
+ },
+ {
+ "rev": 140,
+ "status": "bad",
+ "link": "https://some_tryjob_3_url.com",
+ },
+ ]
+
+ with self.assertRaises(ValueError) as err:
+ llvm_bisection.GetRemainingRange(start, end, test_tryjobs)
+
+ error_message = (
+ '"status" is missing or has no value, please '
+ "go to %s and update it" % test_tryjobs[1]["link"]
+ )
+ self.assertEqual(str(err.exception), error_message)
+
+ def testGetRemainingRangeFailedWithInvalidRange(self):
+ start = 100
+ end = 150
+
+ test_tryjobs = [
+ {
+ "rev": 110,
+ "status": "bad",
+ "link": "https://some_tryjob_1_url.com",
+ },
+ {
+ "rev": 125,
+ "status": "skip",
+ "link": "https://some_tryjob_2_url.com",
+ },
+ {
+ "rev": 140,
+ "status": "good",
+ "link": "https://some_tryjob_3_url.com",
+ },
+ ]
+
+ with self.assertRaises(AssertionError) as err:
+ llvm_bisection.GetRemainingRange(start, end, test_tryjobs)
+
+ expected_error_message = (
+ "Bisection is broken because %d (good) is >= "
+ "%d (bad)" % (test_tryjobs[2]["rev"], test_tryjobs[0]["rev"])
+ )
+
+ self.assertEqual(str(err.exception), expected_error_message)
+
+ @mock.patch.object(get_llvm_hash, "GetGitHashFrom")
+ def testGetCommitsBetweenPassed(self, mock_get_git_hash):
+ start = git_llvm_rev.base_llvm_revision
+ end = start + 10
+ test_pending_revisions = {start + 7}
+ test_skip_revisions = {
+ start + 1,
+ start + 2,
+ start + 4,
+ start + 8,
+ start + 9,
+ }
+ parallel = 3
+ abs_path_to_src = "/abs/path/to/src"
+
+ revs = ["a123testhash3", "a123testhash5"]
+ mock_get_git_hash.side_effect = revs
+
+ git_hashes = [
+ git_llvm_rev.base_llvm_revision + 3,
+ git_llvm_rev.base_llvm_revision + 5,
+ ]
+
+ self.assertEqual(
+ llvm_bisection.GetCommitsBetween(
+ start,
+ end,
+ parallel,
+ abs_path_to_src,
+ test_pending_revisions,
+ test_skip_revisions,
+ ),
+ (git_hashes, revs),
+ )
+
+ def testLoadStatusFilePassedWithExistingFile(self):
+ start = 100
+ end = 150
+
+ test_bisect_state = {"start": start, "end": end, "jobs": []}
+
+ # Simulate that the status file exists.
+ with test_helpers.CreateTemporaryJsonFile() as temp_json_file:
+ with open(temp_json_file, "w") as f:
+ test_helpers.WritePrettyJsonFile(test_bisect_state, f)
+
+ self.assertEqual(
+ llvm_bisection.LoadStatusFile(temp_json_file, start, end),
+ test_bisect_state,
+ )
+
+ def testLoadStatusFilePassedWithoutExistingFile(self):
+ start = 200
+ end = 250
+
+ expected_bisect_state = {"start": start, "end": end, "jobs": []}
+
+ last_tested = "/abs/path/to/file_that_does_not_exist.json"
+
+ self.assertEqual(
+ llvm_bisection.LoadStatusFile(last_tested, start, end),
+ expected_bisect_state,
+ )
+
+ @mock.patch.object(modify_a_tryjob, "AddTryjob")
+ def testBisectPassed(self, mock_add_tryjob):
+
+ git_hash_list = ["a123testhash1", "a123testhash2", "a123testhash3"]
+ revisions_list = [102, 104, 106]
+
+ # Simulate behavior of `AddTryjob()` when successfully launched a tryjob for
+ # the updated packages.
+ @test_helpers.CallCountsToMockFunctions
+ def MockAddTryjob(
+ call_count,
+ _packages,
+ _git_hash,
+ _revision,
+ _chroot_path,
+ _patch_file,
+ _extra_cls,
+ _options,
+ _builder,
+ _verbose,
+ _svn_revision,
+ ):
+
+ if call_count < 2:
+ return {"rev": revisions_list[call_count], "status": "pending"}
+
+ # Simulate an exception happened along the way when updating the
+ # packages' `LLVM_NEXT_HASH`.
+ if call_count == 2:
+ raise ValueError("Unable to launch tryjob")
+
+ assert False, "Called `AddTryjob()` more than expected."
+
+ # Use the test function to simulate `AddTryjob()`.
+ mock_add_tryjob.side_effect = MockAddTryjob
+
+ start = 100
+ end = 110
+
+ bisection_contents = {"start": start, "end": end, "jobs": []}
+
+ args_output = test_helpers.ArgsOutputTest()
+
+ packages = ["sys-devel/llvm"]
+ patch_file = "/abs/path/to/PATCHES.json"
+
+ # Create a temporary .JSON file to simulate a status file for bisection.
+ with test_helpers.CreateTemporaryJsonFile() as temp_json_file:
+ with open(temp_json_file, "w") as f:
+ test_helpers.WritePrettyJsonFile(bisection_contents, f)
+
+ # Verify that the status file is updated when an exception happened when
+ # attempting to launch a revision (i.e. progress is not lost).
+ with self.assertRaises(ValueError) as err:
+ llvm_bisection.Bisect(
+ revisions_list,
+ git_hash_list,
+ bisection_contents,
+ temp_json_file,
+ packages,
+ args_output.chroot_path,
+ patch_file,
+ args_output.extra_change_lists,
+ args_output.options,
+ args_output.builders,
+ args_output.verbose,
+ )
+
+ expected_bisection_contents = {
+ "start": start,
+ "end": end,
+ "jobs": [
+ {"rev": revisions_list[0], "status": "pending"},
+ {"rev": revisions_list[1], "status": "pending"},
+ ],
+ }
+
+ # Verify that the launched tryjobs were added to the status file when
+ # an exception happened.
+ with open(temp_json_file) as f:
+ json_contents = json.load(f)
+
+ self.assertEqual(json_contents, expected_bisection_contents)
+
+ self.assertEqual(str(err.exception), "Unable to launch tryjob")
+
+ self.assertEqual(mock_add_tryjob.call_count, 3)
+
+ @mock.patch.object(subprocess, "check_output", return_value=None)
+ @mock.patch.object(
+ get_llvm_hash.LLVMHash, "GetLLVMHash", return_value="a123testhash4"
+ )
+ @mock.patch.object(llvm_bisection, "GetCommitsBetween")
+ @mock.patch.object(llvm_bisection, "GetRemainingRange")
+ @mock.patch.object(llvm_bisection, "LoadStatusFile")
+ @mock.patch.object(chroot, "VerifyOutsideChroot", return_value=True)
+ def testMainPassed(
+ self,
+ mock_outside_chroot,
+ mock_load_status_file,
+ mock_get_range,
+ mock_get_revision_and_hash_list,
+ _mock_get_bad_llvm_hash,
+ mock_abandon_cl,
+ ):
+
+ start = 500
+ end = 502
+ cl = 1
+
+ bisect_state = {
+ "start": start,
+ "end": end,
+ "jobs": [{"rev": 501, "status": "bad", "cl": cl}],
+ }
+
+ skip_revisions = {501}
+ pending_revisions = {}
+
+ mock_load_status_file.return_value = bisect_state
+
+ mock_get_range.return_value = (
+ start,
+ end,
+ pending_revisions,
+ skip_revisions,
+ )
+
+ mock_get_revision_and_hash_list.return_value = [], []
+
+ args_output = test_helpers.ArgsOutputTest()
+ args_output.start_rev = start
+ args_output.end_rev = end
+ args_output.parallel = 3
+ args_output.src_path = None
+ args_output.chroot_path = "somepath"
+ args_output.cleanup = True
+
+ self.assertEqual(
+ llvm_bisection.main(args_output),
+ llvm_bisection.BisectionExitStatus.BISECTION_COMPLETE.value,
+ )
+
+ mock_outside_chroot.assert_called_once()
+
+ mock_load_status_file.assert_called_once()
+
+ mock_get_range.assert_called_once()
+
+ mock_get_revision_and_hash_list.assert_called_once()
+
+ mock_abandon_cl.assert_called_once()
+ self.assertEqual(
+ mock_abandon_cl.call_args,
+ mock.call(
+ [
+ os.path.join(
+ args_output.chroot_path, "chromite/bin/gerrit"
+ ),
+ "abandon",
+ str(cl),
+ ],
+ stderr=subprocess.STDOUT,
+ encoding="utf-8",
+ ),
+ )
+
+ @mock.patch.object(llvm_bisection, "LoadStatusFile")
+ @mock.patch.object(chroot, "VerifyOutsideChroot", return_value=True)
+ def testMainFailedWithInvalidRange(
+ self, mock_outside_chroot, mock_load_status_file
+ ):
+
+ start = 500
+ end = 502
+
+ bisect_state = {
+ "start": start - 1,
+ "end": end,
+ }
+
+ mock_load_status_file.return_value = bisect_state
+
+ args_output = test_helpers.ArgsOutputTest()
+ args_output.start_rev = start
+ args_output.end_rev = end
+ args_output.parallel = 3
+ args_output.src_path = None
+
+ with self.assertRaises(ValueError) as err:
+ llvm_bisection.main(args_output)
+
+ error_message = (
+ f"The start {start} or the end {end} version provided is "
+ f'different than "start" {bisect_state["start"]} or "end" '
+ f'{bisect_state["end"]} in the .JSON file'
+ )
+
+ self.assertEqual(str(err.exception), error_message)
+
+ mock_outside_chroot.assert_called_once()
+
+ mock_load_status_file.assert_called_once()
+
+ @mock.patch.object(llvm_bisection, "GetCommitsBetween")
+ @mock.patch.object(llvm_bisection, "GetRemainingRange")
+ @mock.patch.object(llvm_bisection, "LoadStatusFile")
+ @mock.patch.object(chroot, "VerifyOutsideChroot", return_value=True)
+ def testMainFailedWithPendingBuilds(
+ self,
+ mock_outside_chroot,
+ mock_load_status_file,
+ mock_get_range,
+ mock_get_revision_and_hash_list,
+ ):
+
+ start = 500
+ end = 502
+ rev = 501
+
+ bisect_state = {
+ "start": start,
+ "end": end,
+ "jobs": [{"rev": rev, "status": "pending"}],
+ }
+
+ skip_revisions = {}
+ pending_revisions = {rev}
+
+ mock_load_status_file.return_value = bisect_state
+
+ mock_get_range.return_value = (
+ start,
+ end,
+ pending_revisions,
+ skip_revisions,
+ )
+
+ mock_get_revision_and_hash_list.return_value = [], []
+
+ args_output = test_helpers.ArgsOutputTest()
+ args_output.start_rev = start
+ args_output.end_rev = end
+ args_output.parallel = 3
+ args_output.src_path = None
+
+ with self.assertRaises(ValueError) as err:
+ llvm_bisection.main(args_output)
+
+ error_message = (
+ f"No revisions between start {start} and end {end} to "
+ "create tryjobs\nThe following tryjobs are pending:\n"
+ f"{rev}\n"
+ )
+
+ self.assertEqual(str(err.exception), error_message)
+
+ mock_outside_chroot.assert_called_once()
+
+ mock_load_status_file.assert_called_once()
+
+ mock_get_range.assert_called_once()
+
+ mock_get_revision_and_hash_list.assert_called_once()
+
+ @mock.patch.object(llvm_bisection, "GetCommitsBetween")
+ @mock.patch.object(llvm_bisection, "GetRemainingRange")
+ @mock.patch.object(llvm_bisection, "LoadStatusFile")
+ @mock.patch.object(chroot, "VerifyOutsideChroot", return_value=True)
+ def testMainFailedWithDuplicateBuilds(
+ self,
+ mock_outside_chroot,
+ mock_load_status_file,
+ mock_get_range,
+ mock_get_revision_and_hash_list,
+ ):
+
+ start = 500
+ end = 502
+ rev = 501
+ git_hash = "a123testhash1"
+
+ bisect_state = {
+ "start": start,
+ "end": end,
+ "jobs": [{"rev": rev, "status": "pending"}],
+ }
- with self.assertRaises(ValueError) as err:
- llvm_bisection.main(args_output)
+ skip_revisions = {}
+ pending_revisions = {rev}
- error_message = (f'The start {start} or the end {end} version provided is '
- f'different than "start" {bisect_state["start"]} or "end" '
- f'{bisect_state["end"]} in the .JSON file')
+ mock_load_status_file.return_value = bisect_state
+
+ mock_get_range.return_value = (
+ start,
+ end,
+ pending_revisions,
+ skip_revisions,
+ )
- self.assertEqual(str(err.exception), error_message)
+ mock_get_revision_and_hash_list.return_value = [rev], [git_hash]
+
+ args_output = test_helpers.ArgsOutputTest()
+ args_output.start_rev = start
+ args_output.end_rev = end
+ args_output.parallel = 3
+ args_output.src_path = None
+
+ with self.assertRaises(ValueError) as err:
+ llvm_bisection.main(args_output)
- mock_outside_chroot.assert_called_once()
+ error_message = 'Revision %d exists already in "jobs"' % rev
+ self.assertEqual(str(err.exception), error_message)
- mock_load_status_file.assert_called_once()
+ mock_outside_chroot.assert_called_once()
- @mock.patch.object(llvm_bisection, 'GetCommitsBetween')
- @mock.patch.object(llvm_bisection, 'GetRemainingRange')
- @mock.patch.object(llvm_bisection, 'LoadStatusFile')
- @mock.patch.object(chroot, 'VerifyOutsideChroot', return_value=True)
- def testMainFailedWithPendingBuilds(self, mock_outside_chroot,
- mock_load_status_file, mock_get_range,
- mock_get_revision_and_hash_list):
+ mock_load_status_file.assert_called_once()
- start = 500
- end = 502
- rev = 501
+ mock_get_range.assert_called_once()
- bisect_state = {
- 'start': start,
- 'end': end,
- 'jobs': [{
- 'rev': rev,
- 'status': 'pending'
- }]
- }
+ mock_get_revision_and_hash_list.assert_called_once()
- skip_revisions = {}
- pending_revisions = {rev}
+ @mock.patch.object(subprocess, "check_output", return_value=None)
+ @mock.patch.object(
+ get_llvm_hash.LLVMHash, "GetLLVMHash", return_value="a123testhash4"
+ )
+ @mock.patch.object(llvm_bisection, "GetCommitsBetween")
+ @mock.patch.object(llvm_bisection, "GetRemainingRange")
+ @mock.patch.object(llvm_bisection, "LoadStatusFile")
+ @mock.patch.object(chroot, "VerifyOutsideChroot", return_value=True)
+ def testMainFailedToAbandonCL(
+ self,
+ mock_outside_chroot,
+ mock_load_status_file,
+ mock_get_range,
+ mock_get_revision_and_hash_list,
+ _mock_get_bad_llvm_hash,
+ mock_abandon_cl,
+ ):
- mock_load_status_file.return_value = bisect_state
+ start = 500
+ end = 502
- mock_get_range.return_value = (start, end, pending_revisions,
- skip_revisions)
+ bisect_state = {
+ "start": start,
+ "end": end,
+ "jobs": [{"rev": 501, "status": "bad", "cl": 0}],
+ }
- mock_get_revision_and_hash_list.return_value = [], []
+ skip_revisions = {501}
+ pending_revisions = {}
- args_output = test_helpers.ArgsOutputTest()
- args_output.start_rev = start
- args_output.end_rev = end
- args_output.parallel = 3
- args_output.src_path = None
+ mock_load_status_file.return_value = bisect_state
- with self.assertRaises(ValueError) as err:
- llvm_bisection.main(args_output)
+ mock_get_range.return_value = (
+ start,
+ end,
+ pending_revisions,
+ skip_revisions,
+ )
- error_message = (f'No revisions between start {start} and end {end} to '
- 'create tryjobs\nThe following tryjobs are pending:\n'
- f'{rev}\n')
+ mock_get_revision_and_hash_list.return_value = ([], [])
- self.assertEqual(str(err.exception), error_message)
+ error_message = "Error message."
+ mock_abandon_cl.side_effect = subprocess.CalledProcessError(
+ returncode=1, cmd=[], output=error_message
+ )
+
+ args_output = test_helpers.ArgsOutputTest()
+ args_output.start_rev = start
+ args_output.end_rev = end
+ args_output.parallel = 3
+ args_output.src_path = None
+ args_output.cleanup = True
- mock_outside_chroot.assert_called_once()
+ with self.assertRaises(subprocess.CalledProcessError) as err:
+ llvm_bisection.main(args_output)
- mock_load_status_file.assert_called_once()
+ self.assertEqual(err.exception.output, error_message)
- mock_get_range.assert_called_once()
+ mock_outside_chroot.assert_called_once()
- mock_get_revision_and_hash_list.assert_called_once()
-
- @mock.patch.object(llvm_bisection, 'GetCommitsBetween')
- @mock.patch.object(llvm_bisection, 'GetRemainingRange')
- @mock.patch.object(llvm_bisection, 'LoadStatusFile')
- @mock.patch.object(chroot, 'VerifyOutsideChroot', return_value=True)
- def testMainFailedWithDuplicateBuilds(self, mock_outside_chroot,
- mock_load_status_file, mock_get_range,
- mock_get_revision_and_hash_list):
+ mock_load_status_file.assert_called_once()
- start = 500
- end = 502
- rev = 501
- git_hash = 'a123testhash1'
+ mock_get_range.assert_called_once()
- bisect_state = {
- 'start': start,
- 'end': end,
- 'jobs': [{
- 'rev': rev,
- 'status': 'pending'
- }]
- }
- skip_revisions = {}
- pending_revisions = {rev}
-
- mock_load_status_file.return_value = bisect_state
-
- mock_get_range.return_value = (start, end, pending_revisions,
- skip_revisions)
-
- mock_get_revision_and_hash_list.return_value = [rev], [git_hash]
-
- args_output = test_helpers.ArgsOutputTest()
- args_output.start_rev = start
- args_output.end_rev = end
- args_output.parallel = 3
- args_output.src_path = None
-
- with self.assertRaises(ValueError) as err:
- llvm_bisection.main(args_output)
-
- error_message = ('Revision %d exists already in "jobs"' % rev)
- self.assertEqual(str(err.exception), error_message)
-
- mock_outside_chroot.assert_called_once()
-
- mock_load_status_file.assert_called_once()
-
- mock_get_range.assert_called_once()
-
- mock_get_revision_and_hash_list.assert_called_once()
-
- @mock.patch.object(subprocess, 'check_output', return_value=None)
- @mock.patch.object(
- get_llvm_hash.LLVMHash, 'GetLLVMHash', return_value='a123testhash4')
- @mock.patch.object(llvm_bisection, 'GetCommitsBetween')
- @mock.patch.object(llvm_bisection, 'GetRemainingRange')
- @mock.patch.object(llvm_bisection, 'LoadStatusFile')
- @mock.patch.object(chroot, 'VerifyOutsideChroot', return_value=True)
- def testMainFailedToAbandonCL(self, mock_outside_chroot,
- mock_load_status_file, mock_get_range,
- mock_get_revision_and_hash_list,
- _mock_get_bad_llvm_hash, mock_abandon_cl):
-
- start = 500
- end = 502
-
- bisect_state = {
- 'start': start,
- 'end': end,
- 'jobs': [{
- 'rev': 501,
- 'status': 'bad',
- 'cl': 0
- }]
- }
-
- skip_revisions = {501}
- pending_revisions = {}
-
- mock_load_status_file.return_value = bisect_state
-
- mock_get_range.return_value = (start, end, pending_revisions,
- skip_revisions)
-
- mock_get_revision_and_hash_list.return_value = ([], [])
-
- error_message = 'Error message.'
- mock_abandon_cl.side_effect = subprocess.CalledProcessError(
- returncode=1, cmd=[], output=error_message)
-
- args_output = test_helpers.ArgsOutputTest()
- args_output.start_rev = start
- args_output.end_rev = end
- args_output.parallel = 3
- args_output.src_path = None
- args_output.cleanup = True
-
- with self.assertRaises(subprocess.CalledProcessError) as err:
- llvm_bisection.main(args_output)
-
- self.assertEqual(err.exception.output, error_message)
-
- mock_outside_chroot.assert_called_once()
-
- mock_load_status_file.assert_called_once()
-
- mock_get_range.assert_called_once()
-
-
-if __name__ == '__main__':
- unittest.main()
+if __name__ == "__main__":
+ unittest.main()
diff --git a/llvm_tools/llvm_local_bisection.sh b/llvm_tools/llvm_local_bisection.sh
new file mode 100755
index 00000000..e319080c
--- /dev/null
+++ b/llvm_tools/llvm_local_bisection.sh
@@ -0,0 +1,109 @@
+#!/bin/bash -u
+# -*- coding: utf-8 -*-
+# Copyright 2022 The ChromiumOS Authors
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# llvm_bisection_template.sh
+#
+# This script is meant to be run inside a `git bisect` process, like so:
+#
+# $ cd <your llvm-project dir>
+# $ git bisect start
+# $ git bisect bad <your bad ref>
+# $ git bisect good <your good ref>
+# $ git bisect run ~/chromimuos/src/scripts/llvm_bisection_template.sh
+#
+# This template exists as a "batteries included" LLVM bisection script,
+# which will modify the LLVM_NEXT hash to help the mage track down issues
+# locally.
+#
+# Modify the fixme sections below to customize to your bisection use-case.
+
+# FIXME: Replace this for the location of your llvm clone within the chroot.
+# We need this for the git history.
+LLVM_CLONE_PATH="${HOME}/chromiumos/src/third_party/llvm-project"
+
+main () {
+ # Note this builds with USE="llvm-next debug -thinlto -llvm_pgo_use continue-on-patch-failure"
+ build_llvm || exit
+
+ # FIXME: Write your actual bisection command here which uses
+ # LLVM_NEXT here.
+ #
+ # Example bisection command:
+ #
+ # build_pkg efitools || exit 1
+ #
+ # You can use build_pkg if you want to emerge a package and print
+ # out diagnostics along the way
+ #
+ # Fail Example: build_pkg "${MY_PACKAGE}" || exit 1
+ # Skip Example: build_pkg "${MY_PACKAGE}" || exit 125
+ #
+}
+
+# ---------------------------------------------------------------------
+
+# Current LLVM_NEXT_HASH we're using. Does not need to be set.
+CURRENT='UNKNOWN'
+
+logdo () {
+ local cmd="${1}"
+ shift
+ printf '%1 $ %2' "$(date '+%T')" "${cmd}"
+ for i in "$@"; do
+ printf "'%1'" "${i}"
+ done
+ printf "\n"
+ "${cmd}" "$@"
+}
+
+log () {
+ echo "$(date '+%T') | $*"
+}
+
+build_llvm () {
+ cd "${LLVM_CLONE_PATH}" || exit 2 # Exit with error
+ local llvm_ebuild_path
+ llvm_ebuild_path="$(readlink -f "$(equery which llvm)")"
+ CURRENT="$(git rev-parse --short HEAD)"
+ log "Current hash=${CURRENT}"
+ NEW_LINE="LLVM_NEXT_HASH=\"${CURRENT}\""
+ sed -i "s/^LLVM_NEXT_HASH=\".*\"/${NEW_LINE}/" "${llvm_ebuild_path}"
+
+ local logfile="/tmp/build-llvm.${CURRENT}.out"
+ log "Writing logs to ${logfile}"
+ log "sudo USE='llvm-next debug -thinlto -llvm_use_pgo continue-on-patch-failure'" \
+ " emerge sys-devel/llvm"
+ logdo sudo USE='llvm-next debug -thinlto -llvm_use_pgo continue-on-patch-failure' emerge \
+ sys-devel/llvm \
+ &> "${logfile}"
+ local emerge_exit_code="$?"
+ if [[ "${emerge_exit_code}" -ne 0 ]]; then
+ log "FAILED to build llvm with hash=${CURRENT}"
+ log 'Skipping this hash'
+ return 125 # 125 is the "skip" exit code.
+ fi
+ log "Succesfully built LLVM with hash=${CURRENT}"
+ return 0 # Explicitly returning 0 for "good" even if a command errors out
+}
+
+build_pkg () {
+ local pkg="${1}"
+
+ local logfile="/tmp/build-${pkg}.${CURRENT}.out"
+ log "Writing logs to ${logfile}"
+ log "sudo emerge ${pkg}"
+ logdo sudo emerge "${pkg}" \
+ &> "${logfile}"
+ local emerge_exit_code="$?"
+ if [[ "${emerge_exit_code}" -ne 0 ]]; then
+ log "FAILED to build ${pkg} with hash=${CURRENT}"
+ return 1 # 1 here isn't for bisection, but for chaining with `||`
+ fi
+ log "Successfully built ${pkg} with hash=${CURRENT}"
+ return 0 # Explicitly returning 0 for "good" even if a command errors out
+}
+
+main
diff --git a/llvm_tools/llvm_patch_management.py b/llvm_tools/llvm_patch_management.py
deleted file mode 100755
index 90f9a5c0..00000000
--- a/llvm_tools/llvm_patch_management.py
+++ /dev/null
@@ -1,276 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-#
-# pylint: disable=global-statement
-
-"""Creates the arguments for the patch manager for LLVM."""
-
-from __future__ import print_function
-
-import argparse
-import os
-
-from failure_modes import FailureModes
-import chroot
-import get_llvm_hash
-import patch_manager
-import subprocess_helpers
-
-# If set to `True`, then the contents of `stdout` after executing a command will
-# be displayed to the terminal.
-verbose = False
-
-
-def GetCommandLineArgs():
- """Parses the commandline for the optional commandline arguments.
-
- Returns:
- An argument parser object that contains all the commandline arguments.
- """
-
- # Default path to the chroot if a path is not specified.
- cros_root = os.path.expanduser('~')
- cros_root = os.path.join(cros_root, 'chromiumos')
-
- # Create parser and add optional command-line arguments.
- parser = argparse.ArgumentParser(description='Patch management for packages.')
-
- # Add argument for a specific chroot path.
- parser.add_argument(
- '--chroot_path',
- type=patch_manager.is_directory,
- default=cros_root,
- help='the absolute path to the chroot (default: %(default)s)')
-
- # Add argument for which packages to manage their patches.
- parser.add_argument(
- '--packages',
- required=False,
- nargs='+',
- default=['sys-devel/llvm'],
- help='the packages to manage their patches (default: %(default)s)')
-
- # Add argument for whether to display command contents to `stdout`.
- parser.add_argument(
- '--verbose',
- action='store_true',
- help='display contents of a command to the terminal '
- '(default: %(default)s)')
-
- # Add argument for the LLVM version to use for patch management.
- parser.add_argument(
- '--llvm_version',
- type=int,
- help='the LLVM version to use for patch management. Alternatively, you '
- 'can pass "google3" or "google3-unstable". (Default: "google3")')
-
- # Add argument for the mode of the patch management when handling patches.
- parser.add_argument(
- '--failure_mode',
- default=FailureModes.FAIL.value,
- choices=[FailureModes.FAIL.value, FailureModes.CONTINUE.value,
- FailureModes.DISABLE_PATCHES.value,
- FailureModes.REMOVE_PATCHES.value],
- help='the mode of the patch manager when handling failed patches ' \
- '(default: %(default)s)')
-
- # Add argument for the patch metadata file in $FILESDIR of LLVM.
- parser.add_argument(
- '--patch_metadata_file',
- default='PATCHES.json',
- help='the .json file in $FILESDIR that has all the patches and their '
- 'metadata if applicable (default: %(default)s)')
-
- # Parse the command line.
- args_output = parser.parse_args()
-
- global verbose
-
- verbose = args_output.verbose
-
- unique_packages = list(set(args_output.packages))
-
- # Duplicate packages were passed into the command line
- if len(unique_packages) != len(args_output.packages):
- raise ValueError('Duplicate packages were passed in: %s' % ' '.join(
- args_output.packages))
-
- args_output.packages = unique_packages
-
- return args_output
-
-
-def GetPathToFilesDirectory(chroot_path, package):
- """Gets the absolute path to $FILESDIR of the package.
-
- Args:
- chroot_path: The absolute path to the chroot.
- package: The package to find its absolute path to $FILESDIR.
-
- Returns:
- The absolute path to $FILESDIR.
-
- Raises:
- ValueError: An invalid chroot path has been provided.
- """
-
- if not os.path.isdir(chroot_path):
- raise ValueError('Invalid chroot provided: %s' % chroot_path)
-
- # Get the absolute chroot path to the ebuild.
- chroot_ebuild_path = subprocess_helpers.ChrootRunCommand(
- chroot_path, ['equery', 'w', package], verbose=verbose)
-
- # Get the absolute chroot path to $FILESDIR's parent directory.
- filesdir_parent_path = os.path.dirname(chroot_ebuild_path.strip())
-
- # Get the relative path to $FILESDIR's parent directory.
- rel_path = _GetRelativePathOfChrootPath(filesdir_parent_path)
-
- # Construct the absolute path to the package's 'files' directory.
- return os.path.join(chroot_path, rel_path, 'files/')
-
-
-def _GetRelativePathOfChrootPath(chroot_path):
- """Gets the relative path of the chroot path passed in.
-
- Args:
- chroot_path: The chroot path to get its relative path.
-
- Returns:
- The relative path after '/mnt/host/source/'.
-
- Raises:
- ValueError: The prefix of 'chroot_path' did not match '/mnt/host/source/'.
- """
-
- chroot_prefix = '/mnt/host/source/'
-
- if not chroot_path.startswith(chroot_prefix):
- raise ValueError('Invalid prefix for the chroot path: %s' % chroot_path)
-
- return chroot_path[len(chroot_prefix):]
-
-
-def _CheckPatchMetadataPath(patch_metadata_path):
- """Checks that the patch metadata path is valid.
-
- Args:
- patch_metadata_path: The absolute path to the .json file that has the
- patches and their metadata.
-
- Raises:
- ValueError: The file does not exist or the file does not end in '.json'.
- """
-
- if not os.path.isfile(patch_metadata_path):
- raise ValueError('Invalid file provided: %s' % patch_metadata_path)
-
- if not patch_metadata_path.endswith('.json'):
- raise ValueError('File does not end in ".json": %s' % patch_metadata_path)
-
-
-def _MoveSrcTreeHEADToGitHash(src_path, git_hash):
- """Moves HEAD to 'git_hash'."""
-
- move_head_cmd = ['git', '-C', src_path, 'checkout', git_hash]
-
- subprocess_helpers.ExecCommandAndCaptureOutput(move_head_cmd, verbose=verbose)
-
-
-def UpdatePackagesPatchMetadataFile(chroot_path, svn_version,
- patch_metadata_file, packages, mode):
- """Updates the packages metadata file.
-
- Args:
- chroot_path: The absolute path to the chroot.
- svn_version: The version to use for patch management.
- patch_metadata_file: The patch metadta file where all the patches and
- their metadata are.
- packages: All the packages to update their patch metadata file.
- mode: The mode for the patch manager to use when an applicable patch
- fails to apply.
- Ex: 'FailureModes.FAIL'
-
- Returns:
- A dictionary where the key is the package name and the value is a dictionary
- that has information on the patches.
- """
-
- # A dictionary where the key is the package name and the value is a dictionary
- # that has information on the patches.
- package_info = {}
-
- llvm_hash = get_llvm_hash.LLVMHash()
-
- with llvm_hash.CreateTempDirectory() as temp_dir:
- with get_llvm_hash.CreateTempLLVMRepo(temp_dir) as src_path:
- # Ensure that 'svn_version' exists in the chromiumum mirror of LLVM by
- # finding its corresponding git hash.
- git_hash = get_llvm_hash.GetGitHashFrom(src_path, svn_version)
-
- # Git hash of 'svn_version' exists, so move the source tree's HEAD to
- # 'git_hash' via `git checkout`.
- _MoveSrcTreeHEADToGitHash(src_path, git_hash)
-
- for cur_package in packages:
- # Get the absolute path to $FILESDIR of the package.
- filesdir_path = GetPathToFilesDirectory(chroot_path, cur_package)
-
- # Construct the absolute path to the patch metadata file where all the
- # patches and their metadata are.
- patch_metadata_path = os.path.join(filesdir_path, patch_metadata_file)
-
- # Make sure the patch metadata path is valid.
- _CheckPatchMetadataPath(patch_metadata_path)
-
- patch_manager.CleanSrcTree(src_path)
-
- # Get the patch results for the current package.
- patches_info = patch_manager.HandlePatches(
- svn_version, patch_metadata_path, filesdir_path, src_path, mode)
-
- package_info[cur_package] = patches_info._asdict()
-
- return package_info
-
-
-def main():
- """Updates the patch metadata file of each package if possible.
-
- Raises:
- AssertionError: The script was run inside the chroot.
- """
-
- chroot.VerifyOutsideChroot()
-
- args_output = GetCommandLineArgs()
-
- # Get the google3 LLVM version if a LLVM version was not provided.
- llvm_version = args_output.llvm_version
- if llvm_version in ('', 'google3', 'google3-unstable'):
- llvm_version = get_llvm_hash.GetGoogle3LLVMVersion(
- stable=llvm_version != 'google3-unstable')
-
- UpdatePackagesPatchMetadataFile(args_output.chroot_path, llvm_version,
- args_output.patch_metadata_file,
- args_output.packages,
- FailureModes(args_output.failure_mode))
-
- # Only 'disable_patches' and 'remove_patches' can potentially modify the patch
- # metadata file.
- if args_output.failure_mode == FailureModes.DISABLE_PATCHES.value or \
- args_output.failure_mode == FailureModes.REMOVE_PATCHES.value:
- print('The patch file %s has been modified for the packages:' %
- args_output.patch_metadata_file)
- print('\n'.join(args_output.packages))
- else:
- print('Applicable patches in %s applied successfully.' %
- args_output.patch_metadata_file)
-
-
-if __name__ == '__main__':
- main()
diff --git a/llvm_tools/llvm_patch_management_unittest.py b/llvm_tools/llvm_patch_management_unittest.py
deleted file mode 100755
index 968a816a..00000000
--- a/llvm_tools/llvm_patch_management_unittest.py
+++ /dev/null
@@ -1,307 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# pylint: disable=protected-access
-
-"""Unit tests when creating the arguments for the patch manager."""
-
-from __future__ import print_function
-from collections import namedtuple
-import os
-import unittest
-import unittest.mock as mock
-
-from failure_modes import FailureModes
-import get_llvm_hash
-import llvm_patch_management
-import patch_manager
-import subprocess_helpers
-
-
-class LlvmPatchManagementTest(unittest.TestCase):
- """Test class when constructing the arguments for the patch manager."""
-
- # Simulate the behavior of `os.path.isdir()` when the chroot path does not
- # exist or is not a directory.
- @mock.patch.object(os.path, 'isdir', return_value=False)
- def testInvalidChrootPathWhenGetPathToFilesDir(self, mock_isdir):
- chroot_path = '/some/path/to/chroot'
- package = 'sys-devel/llvm'
-
- # Verify the exception is raised when an invalid absolute path to the chroot
- # is passed in.
- with self.assertRaises(ValueError) as err:
- llvm_patch_management.GetPathToFilesDirectory(chroot_path, package)
-
- self.assertEqual(
- str(err.exception), 'Invalid chroot provided: %s' % chroot_path)
-
- mock_isdir.assert_called_once()
-
- # Simulate the behavior of 'os.path.isdir()' when a valid chroot path is
- # passed in.
- @mock.patch.object(os.path, 'isdir', return_value=True)
- @mock.patch.object(subprocess_helpers, 'ChrootRunCommand')
- @mock.patch.object(llvm_patch_management, '_GetRelativePathOfChrootPath')
- def testSuccessfullyGetPathToFilesDir(
- self, mock_get_relative_path_of_chroot_path, mock_chroot_cmd, mock_isdir):
-
- package_chroot_path = '/mnt/host/source/path/to/llvm/llvm.ebuild'
-
- # Simulate behavior of 'ChrootRunCommand()' when successfully
- # retrieved the absolute chroot path to the package's ebuild.
- mock_chroot_cmd.return_value = package_chroot_path
-
- # Simulate behavior of '_GetRelativePathOfChrootPath()' when successfully
- # removed '/mnt/host/source' of the absolute chroot path to the package's
- # ebuild.
- #
- # Returns relative path after '/mnt/host/source/'.
- mock_get_relative_path_of_chroot_path.return_value = 'path/to/llvm'
-
- chroot_path = '/some/path/to/chroot'
-
- package = 'sys-devel/llvm'
-
- self.assertEqual(
- llvm_patch_management.GetPathToFilesDirectory(chroot_path, package),
- '/some/path/to/chroot/path/to/llvm/files/')
-
- mock_isdir.assert_called_once()
-
- mock_chroot_cmd.assert_called_once()
-
- mock_get_relative_path_of_chroot_path.assert_called_once_with(
- '/mnt/host/source/path/to/llvm')
-
- def testInvalidPrefixForChrootPath(self):
- package_chroot_path = '/path/to/llvm'
-
- # Verify the exception is raised when the chroot path does not start with
- # '/mnt/host/source/'.
- with self.assertRaises(ValueError) as err:
- llvm_patch_management._GetRelativePathOfChrootPath(package_chroot_path)
-
- self.assertEqual(
- str(err.exception),
- 'Invalid prefix for the chroot path: %s' % package_chroot_path)
-
- def testValidPrefixForChrootPath(self):
- package_chroot_path = '/mnt/host/source/path/to/llvm'
-
- package_rel_path = 'path/to/llvm'
-
- self.assertEqual(
- llvm_patch_management._GetRelativePathOfChrootPath(package_chroot_path),
- package_rel_path)
-
- # Simulate behavior of 'os.path.isfile()' when the patch metadata file does
- # not exist.
- @mock.patch.object(os.path, 'isfile', return_value=False)
- def testInvalidFileForPatchMetadataPath(self, mock_isfile):
- abs_path_to_patch_file = '/abs/path/to/files/test.json'
-
- # Verify the exception is raised when the absolute path to the patch
- # metadata file does not exist.
- with self.assertRaises(ValueError) as err:
- llvm_patch_management._CheckPatchMetadataPath(abs_path_to_patch_file)
-
- self.assertEqual(
- str(err.exception),
- 'Invalid file provided: %s' % abs_path_to_patch_file)
-
- mock_isfile.assert_called_once()
-
- # Simulate behavior of 'os.path.isfile()' when the absolute path to the
- # patch metadata file exists.
- @mock.patch.object(os.path, 'isfile', return_value=True)
- def testPatchMetadataFileDoesNotEndInJson(self, mock_isfile):
- abs_path_to_patch_file = '/abs/path/to/files/PATCHES'
-
- # Verify the exception is raised when the patch metadata file does not end
- # in '.json'.
- with self.assertRaises(ValueError) as err:
- llvm_patch_management._CheckPatchMetadataPath(abs_path_to_patch_file)
-
- self.assertEqual(
- str(err.exception),
- 'File does not end in ".json": %s' % abs_path_to_patch_file)
-
- mock_isfile.assert_called_once()
-
- @mock.patch.object(os.path, 'isfile')
- def testValidPatchMetadataFile(self, mock_isfile):
- abs_path_to_patch_file = '/abs/path/to/files/PATCHES.json'
-
- # Simulate behavior of 'os.path.isfile()' when the absolute path to the
- # patch metadata file exists.
- mock_isfile.return_value = True
-
- llvm_patch_management._CheckPatchMetadataPath(abs_path_to_patch_file)
-
- mock_isfile.assert_called_once()
-
- # Simulate `GetGitHashFrom()` when successfully retrieved the git hash
- # of the version passed in.
- @mock.patch.object(
- get_llvm_hash, 'GetGitHashFrom', return_value='a123testhash1')
- # Simulate `CreateTempLLVMRepo()` when successfully created a work tree from
- # the LLVM repo copy in `llvm_tools` directory.
- @mock.patch.object(get_llvm_hash, 'CreateTempLLVMRepo')
- # Simulate behavior of `_MoveSrcTreeHEADToGitHash()` when successfully moved
- # the head pointer to the git hash of the revision.
- @mock.patch.object(llvm_patch_management, '_MoveSrcTreeHEADToGitHash')
- @mock.patch.object(llvm_patch_management, 'GetPathToFilesDirectory')
- @mock.patch.object(llvm_patch_management, '_CheckPatchMetadataPath')
- def testExceptionIsRaisedWhenUpdatingAPackagesMetadataFile(
- self, mock_check_patch_metadata_path, mock_get_filesdir_path,
- mock_move_head_pointer, mock_create_temp_llvm_repo, mock_get_git_hash):
-
- abs_path_to_patch_file = \
- '/some/path/to/chroot/some/path/to/filesdir/PATCHES'
-
- # Simulate the behavior of '_CheckPatchMetadataPath()' when the patch
- # metadata file in $FILESDIR does not exist or does not end in '.json'.
- def InvalidPatchMetadataFile(patch_metadata_path):
- self.assertEqual(patch_metadata_path, abs_path_to_patch_file)
-
- raise ValueError(
- 'File does not end in ".json": %s' % abs_path_to_patch_file)
-
- # Use the test function to simulate behavior of '_CheckPatchMetadataPath()'.
- mock_check_patch_metadata_path.side_effect = InvalidPatchMetadataFile
-
- abs_path_to_filesdir = '/some/path/to/chroot/some/path/to/filesdir'
-
- # Simulate the behavior of 'GetPathToFilesDirectory()' when successfully
- # constructed the absolute path to $FILESDIR of a package.
- mock_get_filesdir_path.return_value = abs_path_to_filesdir
-
- temp_work_tree = '/abs/path/to/tmpWorkTree'
-
- # Simulate the behavior of returning the absolute path to a worktree via
- # `git worktree add`.
- mock_create_temp_llvm_repo.return_value.__enter__.return_value.name = \
- temp_work_tree
-
- chroot_path = '/some/path/to/chroot'
- revision = 1000
- patch_file_name = 'PATCHES'
- package_name = 'test-package/package1'
-
- # Verify the exception is raised when a package is constructing the
- # arguments for the patch manager to update its patch metadata file and an
- # exception is raised in the process.
- with self.assertRaises(ValueError) as err:
- llvm_patch_management.UpdatePackagesPatchMetadataFile(
- chroot_path, revision, patch_file_name, [package_name],
- FailureModes.FAIL)
-
- self.assertEqual(
- str(err.exception),
- 'File does not end in ".json": %s' % abs_path_to_patch_file)
-
- mock_get_filesdir_path.assert_called_once_with(chroot_path, package_name)
-
- mock_get_git_hash.assert_called_once()
-
- mock_check_patch_metadata_path.assert_called_once()
-
- mock_move_head_pointer.assert_called_once()
-
- mock_create_temp_llvm_repo.assert_called_once()
-
- # Simulate `CleanSrcTree()` when successfully removed changes from the
- # worktree.
- @mock.patch.object(patch_manager, 'CleanSrcTree')
- # Simulate `GetGitHashFrom()` when successfully retrieved the git hash
- # of the version passed in.
- @mock.patch.object(
- get_llvm_hash, 'GetGitHashFrom', return_value='a123testhash1')
- # Simulate `CreateTempLLVMRepo()` when successfully created a work tree from
- # the LLVM repo copy in `llvm_tools` directory.
- @mock.patch.object(get_llvm_hash, 'CreateTempLLVMRepo')
- # Simulate behavior of `_MoveSrcTreeHEADToGitHash()` when successfully moved
- # the head pointer to the git hash of the revision.
- @mock.patch.object(llvm_patch_management, '_MoveSrcTreeHEADToGitHash')
- @mock.patch.object(llvm_patch_management, 'GetPathToFilesDirectory')
- @mock.patch.object(llvm_patch_management, '_CheckPatchMetadataPath')
- @mock.patch.object(patch_manager, 'HandlePatches')
- def testSuccessfullyRetrievedPatchResults(
- self, mock_handle_patches, mock_check_patch_metadata_path,
- mock_get_filesdir_path, mock_move_head_pointer,
- mock_create_temp_llvm_repo, mock_get_git_hash, mock_clean_src_tree):
-
- abs_path_to_filesdir = '/some/path/to/chroot/some/path/to/filesdir'
-
- abs_path_to_patch_file = \
- '/some/path/to/chroot/some/path/to/filesdir/PATCHES.json'
-
- # Simulate the behavior of 'GetPathToFilesDirectory()' when successfully
- # constructed the absolute path to $FILESDIR of a package.
- mock_get_filesdir_path.return_value = abs_path_to_filesdir
-
- PatchInfo = namedtuple('PatchInfo', [
- 'applied_patches', 'failed_patches', 'non_applicable_patches',
- 'disabled_patches', 'removed_patches', 'modified_metadata'
- ])
-
- # Simulate the behavior of 'HandlePatches()' when successfully iterated
- # through every patch in the patch metadata file and a dictionary is
- # returned that contains information about the patches' status.
- mock_handle_patches.return_value = PatchInfo(
- applied_patches=['fixes_something.patch'],
- failed_patches=['disables_output.patch'],
- non_applicable_patches=[],
- disabled_patches=[],
- removed_patches=[],
- modified_metadata=None)
-
- temp_work_tree = '/abs/path/to/tmpWorkTree'
-
- # Simulate the behavior of returning the absolute path to a worktree via
- # `git worktree add`.
- mock_create_temp_llvm_repo.return_value.__enter__.return_value.name = \
- temp_work_tree
-
- expected_patch_results = {
- 'applied_patches': ['fixes_something.patch'],
- 'failed_patches': ['disables_output.patch'],
- 'non_applicable_patches': [],
- 'disabled_patches': [],
- 'removed_patches': [],
- 'modified_metadata': None
- }
-
- chroot_path = '/some/path/to/chroot'
- revision = 1000
- patch_file_name = 'PATCHES.json'
- package_name = 'test-package/package2'
-
- patch_info = llvm_patch_management.UpdatePackagesPatchMetadataFile(
- chroot_path, revision, patch_file_name, [package_name],
- FailureModes.CONTINUE)
-
- self.assertDictEqual(patch_info, {package_name: expected_patch_results})
-
- mock_get_filesdir_path.assert_called_once_with(chroot_path, package_name)
-
- mock_check_patch_metadata_path.assert_called_once_with(
- abs_path_to_patch_file)
-
- mock_handle_patches.assert_called_once()
-
- mock_create_temp_llvm_repo.assert_called_once()
-
- mock_get_git_hash.assert_called_once()
-
- mock_move_head_pointer.assert_called_once()
-
- mock_clean_src_tree.assert_called_once()
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/llvm_tools/llvm_project.py b/llvm_tools/llvm_project.py
index 7937729f..79a6cd2e 100644
--- a/llvm_tools/llvm_project.py
+++ b/llvm_tools/llvm_project.py
@@ -1,11 +1,10 @@
# -*- coding: utf-8 -*-
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module for manipulating llvm-project-copy. Generally intended for tests."""
-from __future__ import print_function
import datetime
import os
@@ -17,48 +16,59 @@ import git_llvm_rev
def get_location() -> str:
- """Gets the absolute path for llvm-project-copy."""
- my_dir = os.path.dirname(os.path.abspath(__file__))
- return os.path.join(my_dir, 'llvm-project-copy')
+ """Gets the absolute path for llvm-project-copy."""
+ my_dir = os.path.dirname(os.path.abspath(__file__))
+ return os.path.join(my_dir, "llvm-project-copy")
def ensure_up_to_date():
- """Ensures that llvm-project-copy is checked out and semi-up-to-date."""
+ """Ensures that llvm-project-copy is checked out and semi-up-to-date."""
- checkout = get_location()
- if not os.path.isdir(checkout):
- print(
- 'No llvm-project exists locally; syncing it. This takes a while.',
- file=sys.stderr)
- actual_checkout = get_llvm_hash.GetAndUpdateLLVMProjectInLLVMTools()
- assert checkout == actual_checkout, '%s != %s' % (actual_checkout, checkout)
-
- commit_timestamp = subprocess.check_output(
- [
- 'git', 'log', '-n1', '--format=%ct',
- 'origin/' + git_llvm_rev.MAIN_BRANCH
- ],
- cwd=checkout,
- encoding='utf-8',
- )
+ checkout = get_location()
+ if not os.path.isdir(checkout):
+ print(
+ "No llvm-project exists locally; syncing it. This takes a while.",
+ file=sys.stderr,
+ )
+ actual_checkout = get_llvm_hash.GetAndUpdateLLVMProjectInLLVMTools()
+ assert checkout == actual_checkout, "%s != %s" % (
+ actual_checkout,
+ checkout,
+ )
- commit_time = datetime.datetime.fromtimestamp(int(commit_timestamp.strip()))
- now = datetime.datetime.now()
+ commit_timestamp = subprocess.check_output(
+ [
+ "git",
+ "log",
+ "-n1",
+ "--format=%ct",
+ "origin/" + git_llvm_rev.MAIN_BRANCH,
+ ],
+ cwd=checkout,
+ encoding="utf-8",
+ )
- time_since_last_commit = now - commit_time
+ commit_time = datetime.datetime.fromtimestamp(int(commit_timestamp.strip()))
+ now = datetime.datetime.now()
- # Arbitrary, but if it's been more than 2d since we've seen a commit, it's
- # probably best to bring us up-to-date.
- if time_since_last_commit <= datetime.timedelta(days=2):
- return
+ time_since_last_commit = now - commit_time
- print(
- '%d days have elapsed since the last commit to %s; auto-syncing' %
- (time_since_last_commit.days, checkout),
- file=sys.stderr)
+ # Arbitrary, but if it's been more than 2d since we've seen a commit, it's
+ # probably best to bring us up-to-date.
+ if time_since_last_commit <= datetime.timedelta(days=2):
+ return
- result = subprocess.run(['git', 'fetch', 'origin'], check=False, cwd=checkout)
- if result.returncode:
print(
- 'Sync failed somehow; hoping that things are fresh enough, then...',
- file=sys.stderr)
+ "%d days have elapsed since the last commit to %s; auto-syncing"
+ % (time_since_last_commit.days, checkout),
+ file=sys.stderr,
+ )
+
+ result = subprocess.run(
+ ["git", "fetch", "origin"], check=False, cwd=checkout
+ )
+ if result.returncode:
+ print(
+ "Sync failed somehow; hoping that things are fresh enough, then...",
+ file=sys.stderr,
+ )
diff --git a/llvm_tools/modify_a_tryjob.py b/llvm_tools/modify_a_tryjob.py
index 519fb51e..03de606d 100755
--- a/llvm_tools/modify_a_tryjob.py
+++ b/llvm_tools/modify_a_tryjob.py
@@ -1,12 +1,11 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Modifies a tryjob based off of arguments."""
-from __future__ import print_function
import argparse
import enum
@@ -23,274 +22,360 @@ import update_tryjob_status
class ModifyTryjob(enum.Enum):
- """Options to modify a tryjob."""
+ """Options to modify a tryjob."""
- REMOVE = 'remove'
- RELAUNCH = 'relaunch'
- ADD = 'add'
+ REMOVE = "remove"
+ RELAUNCH = "relaunch"
+ ADD = "add"
def GetCommandLineArgs():
- """Parses the command line for the command line arguments."""
-
- # Default path to the chroot if a path is not specified.
- cros_root = os.path.expanduser('~')
- cros_root = os.path.join(cros_root, 'chromiumos')
-
- # Create parser and add optional command-line arguments.
- parser = argparse.ArgumentParser(
- description='Removes, relaunches, or adds a tryjob.')
-
- # Add argument for the JSON file to use for the update of a tryjob.
- parser.add_argument(
- '--status_file',
- required=True,
- help='The absolute path to the JSON file that contains the tryjobs used '
- 'for bisecting LLVM.')
-
- # Add argument that determines what action to take on the revision specified.
- parser.add_argument(
- '--modify_tryjob',
- required=True,
- choices=[modify_tryjob.value for modify_tryjob in ModifyTryjob],
- help='What action to perform on the tryjob.')
-
- # Add argument that determines which revision to search for in the list of
- # tryjobs.
- parser.add_argument('--revision',
- required=True,
- type=int,
- help='The revision to either remove or relaunch.')
-
- # Add argument for other change lists that want to run alongside the tryjob.
- parser.add_argument(
- '--extra_change_lists',
- type=int,
- nargs='+',
- help='change lists that would like to be run alongside the change list '
- 'of updating the packages')
-
- # Add argument for custom options for the tryjob.
- parser.add_argument('--options',
- required=False,
- nargs='+',
- help='options to use for the tryjob testing')
-
- # Add argument for the builder to use for the tryjob.
- parser.add_argument('--builder',
- help='builder to use for the tryjob testing')
-
- # Add argument for a specific chroot path.
- parser.add_argument('--chroot_path',
- default=cros_root,
- help='the path to the chroot (default: %(default)s)')
-
- # Add argument for whether to display command contents to `stdout`.
- parser.add_argument('--verbose',
- action='store_true',
- help='display contents of a command to the terminal '
- '(default: %(default)s)')
-
- args_output = parser.parse_args()
-
- if (not os.path.isfile(args_output.status_file)
- or not args_output.status_file.endswith('.json')):
- raise ValueError('File does not exist or does not ending in ".json" '
- ': %s' % args_output.status_file)
-
- if (args_output.modify_tryjob == ModifyTryjob.ADD.value
- and not args_output.builder):
- raise ValueError('A builder is required for adding a tryjob.')
- elif (args_output.modify_tryjob != ModifyTryjob.ADD.value
- and args_output.builder):
- raise ValueError('Specifying a builder is only available when adding a '
- 'tryjob.')
-
- return args_output
-
-
-def GetCLAfterUpdatingPackages(packages, git_hash, svn_version, chroot_path,
- patch_metadata_file, svn_option):
- """Updates the packages' LLVM_NEXT."""
-
- change_list = update_chromeos_llvm_hash.UpdatePackages(
- packages,
- update_chromeos_llvm_hash.LLVMVariant.next,
- git_hash,
- svn_version,
- chroot_path,
- patch_metadata_file,
- failure_modes.FailureModes.DISABLE_PATCHES,
- svn_option,
- extra_commit_msg=None)
-
- print('\nSuccessfully updated packages to %d' % svn_version)
- print('Gerrit URL: %s' % change_list.url)
- print('Change list number: %d' % change_list.cl_number)
-
- return change_list
-
-
-def CreateNewTryjobEntryForBisection(cl, extra_cls, options, builder,
- chroot_path, cl_url, revision):
- """Submits a tryjob and adds additional information."""
-
- # Get the tryjob results after submitting the tryjob.
- # Format of 'tryjob_results':
- # [
- # {
- # 'link' : [TRYJOB_LINK],
- # 'buildbucket_id' : [BUILDBUCKET_ID],
- # 'extra_cls' : [EXTRA_CLS_LIST],
- # 'options' : [EXTRA_OPTIONS_LIST],
- # 'builder' : [BUILDER_AS_A_LIST]
- # }
- # ]
- tryjob_results = update_packages_and_run_tests.RunTryJobs(
- cl, extra_cls, options, [builder], chroot_path)
- print('\nTryjob:')
- print(tryjob_results[0])
-
- # Add necessary information about the tryjob.
- tryjob_results[0]['url'] = cl_url
- tryjob_results[0]['rev'] = revision
- tryjob_results[0]['status'] = update_tryjob_status.TryjobStatus.PENDING.value
- tryjob_results[0]['cl'] = cl
-
- return tryjob_results[0]
-
-
-def AddTryjob(packages, git_hash, revision, chroot_path, patch_metadata_file,
- extra_cls, options, builder, verbose, svn_option):
- """Submits a tryjob."""
-
- update_chromeos_llvm_hash.verbose = verbose
-
- change_list = GetCLAfterUpdatingPackages(packages, git_hash, revision,
- chroot_path, patch_metadata_file,
- svn_option)
-
- tryjob_dict = CreateNewTryjobEntryForBisection(change_list.cl_number,
- extra_cls, options, builder,
- chroot_path, change_list.url,
- revision)
-
- return tryjob_dict
-
-
-def PerformTryjobModification(revision, modify_tryjob, status_file, extra_cls,
- options, builder, chroot_path, verbose):
- """Removes, relaunches, or adds a tryjob.
-
- Args:
- revision: The revision associated with the tryjob.
- modify_tryjob: What action to take on the tryjob.
- Ex: ModifyTryjob.REMOVE, ModifyTryjob.RELAUNCH, ModifyTryjob.ADD
- status_file: The .JSON file that contains the tryjobs.
- extra_cls: Extra change lists to be run alongside tryjob
- options: Extra options to pass into 'cros tryjob'.
- builder: The builder to use for 'cros tryjob'.
- chroot_path: The absolute path to the chroot (used by 'cros tryjob' when
- relaunching a tryjob).
- verbose: Determines whether to print the contents of a command to `stdout`.
- """
-
- # Format of 'bisect_contents':
- # {
- # 'start': [START_REVISION_OF_BISECTION]
- # 'end': [END_REVISION_OF_BISECTION]
- # 'jobs' : [
- # {[TRYJOB_INFORMATION]},
- # {[TRYJOB_INFORMATION]},
- # ...,
- # {[TRYJOB_INFORMATION]}
- # ]
- # }
- with open(status_file) as tryjobs:
- bisect_contents = json.load(tryjobs)
-
- if not bisect_contents['jobs'] and modify_tryjob != ModifyTryjob.ADD:
- sys.exit('No tryjobs in %s' % status_file)
-
- tryjob_index = update_tryjob_status.FindTryjobIndex(revision,
- bisect_contents['jobs'])
-
- # 'FindTryjobIndex()' returns None if the tryjob was not found.
- if tryjob_index is None and modify_tryjob != ModifyTryjob.ADD:
- raise ValueError('Unable to find tryjob for %d in %s' %
- (revision, status_file))
-
- # Determine the action to take based off of 'modify_tryjob'.
- if modify_tryjob == ModifyTryjob.REMOVE:
- del bisect_contents['jobs'][tryjob_index]
-
- print('Successfully deleted the tryjob of revision %d' % revision)
- elif modify_tryjob == ModifyTryjob.RELAUNCH:
- # Need to update the tryjob link and buildbucket ID.
+ """Parses the command line for the command line arguments."""
+
+ # Default path to the chroot if a path is not specified.
+ cros_root = os.path.expanduser("~")
+ cros_root = os.path.join(cros_root, "chromiumos")
+
+ # Create parser and add optional command-line arguments.
+ parser = argparse.ArgumentParser(
+ description="Removes, relaunches, or adds a tryjob."
+ )
+
+ # Add argument for the JSON file to use for the update of a tryjob.
+ parser.add_argument(
+ "--status_file",
+ required=True,
+ help="The absolute path to the JSON file that contains the tryjobs used "
+ "for bisecting LLVM.",
+ )
+
+ # Add argument that determines what action to take on the revision specified.
+ parser.add_argument(
+ "--modify_tryjob",
+ required=True,
+ choices=[modify_tryjob.value for modify_tryjob in ModifyTryjob],
+ help="What action to perform on the tryjob.",
+ )
+
+ # Add argument that determines which revision to search for in the list of
+ # tryjobs.
+ parser.add_argument(
+ "--revision",
+ required=True,
+ type=int,
+ help="The revision to either remove or relaunch.",
+ )
+
+ # Add argument for other change lists that want to run alongside the tryjob.
+ parser.add_argument(
+ "--extra_change_lists",
+ type=int,
+ nargs="+",
+ help="change lists that would like to be run alongside the change list "
+ "of updating the packages",
+ )
+
+ # Add argument for custom options for the tryjob.
+ parser.add_argument(
+ "--options",
+ required=False,
+ nargs="+",
+ help="options to use for the tryjob testing",
+ )
+
+ # Add argument for the builder to use for the tryjob.
+ parser.add_argument(
+ "--builder", help="builder to use for the tryjob testing"
+ )
+
+ # Add argument for a specific chroot path.
+ parser.add_argument(
+ "--chroot_path",
+ default=cros_root,
+ help="the path to the chroot (default: %(default)s)",
+ )
+
+ # Add argument for whether to display command contents to `stdout`.
+ parser.add_argument(
+ "--verbose",
+ action="store_true",
+ help="display contents of a command to the terminal "
+ "(default: %(default)s)",
+ )
+
+ args_output = parser.parse_args()
+
+ if not os.path.isfile(
+ args_output.status_file
+ ) or not args_output.status_file.endswith(".json"):
+ raise ValueError(
+ 'File does not exist or does not ending in ".json" '
+ ": %s" % args_output.status_file
+ )
+
+ if (
+ args_output.modify_tryjob == ModifyTryjob.ADD.value
+ and not args_output.builder
+ ):
+ raise ValueError("A builder is required for adding a tryjob.")
+ elif (
+ args_output.modify_tryjob != ModifyTryjob.ADD.value
+ and args_output.builder
+ ):
+ raise ValueError(
+ "Specifying a builder is only available when adding a " "tryjob."
+ )
+
+ return args_output
+
+
+def GetCLAfterUpdatingPackages(
+ packages,
+ git_hash,
+ svn_version,
+ chroot_path,
+ patch_metadata_file,
+ svn_option,
+):
+ """Updates the packages' LLVM_NEXT."""
+
+ change_list = update_chromeos_llvm_hash.UpdatePackages(
+ packages=packages,
+ manifest_packages=[],
+ llvm_variant=update_chromeos_llvm_hash.LLVMVariant.next,
+ git_hash=git_hash,
+ svn_version=svn_version,
+ chroot_path=chroot_path,
+ mode=failure_modes.FailureModes.DISABLE_PATCHES,
+ git_hash_source=svn_option,
+ extra_commit_msg=None,
+ )
+
+ print("\nSuccessfully updated packages to %d" % svn_version)
+ print("Gerrit URL: %s" % change_list.url)
+ print("Change list number: %d" % change_list.cl_number)
+
+ return change_list
+
+
+def CreateNewTryjobEntryForBisection(
+ cl, extra_cls, options, builder, chroot_path, cl_url, revision
+):
+ """Submits a tryjob and adds additional information."""
+
+ # Get the tryjob results after submitting the tryjob.
+ # Format of 'tryjob_results':
+ # [
+ # {
+ # 'link' : [TRYJOB_LINK],
+ # 'buildbucket_id' : [BUILDBUCKET_ID],
+ # 'extra_cls' : [EXTRA_CLS_LIST],
+ # 'options' : [EXTRA_OPTIONS_LIST],
+ # 'builder' : [BUILDER_AS_A_LIST]
+ # }
+ # ]
tryjob_results = update_packages_and_run_tests.RunTryJobs(
- bisect_contents['jobs'][tryjob_index]['cl'],
- bisect_contents['jobs'][tryjob_index]['extra_cls'],
- bisect_contents['jobs'][tryjob_index]['options'],
- bisect_contents['jobs'][tryjob_index]['builder'], chroot_path)
-
- bisect_contents['jobs'][tryjob_index][
- 'status'] = update_tryjob_status.TryjobStatus.PENDING.value
- bisect_contents['jobs'][tryjob_index]['link'] = tryjob_results[0]['link']
- bisect_contents['jobs'][tryjob_index]['buildbucket_id'] = tryjob_results[
- 0]['buildbucket_id']
-
- print('Successfully relaunched the tryjob for revision %d and updated '
- 'the tryjob link to %s' % (revision, tryjob_results[0]['link']))
- elif modify_tryjob == ModifyTryjob.ADD:
- # Tryjob exists already.
- if tryjob_index is not None:
- raise ValueError('Tryjob already exists (index is %d) in %s.' %
- (tryjob_index, status_file))
-
- # Make sure the revision is within the bounds of the start and end of the
- # bisection.
- elif bisect_contents['start'] < revision < bisect_contents['end']:
-
- patch_metadata_file = 'PATCHES.json'
-
- git_hash, revision = get_llvm_hash.GetLLVMHashAndVersionFromSVNOption(
- revision)
-
- tryjob_dict = AddTryjob(update_chromeos_llvm_hash.DEFAULT_PACKAGES,
- git_hash, revision, chroot_path,
- patch_metadata_file, extra_cls, options, builder,
- verbose, revision)
-
- bisect_contents['jobs'].append(tryjob_dict)
-
- print('Successfully added tryjob of revision %d' % revision)
+ cl, extra_cls, options, [builder], chroot_path
+ )
+ print("\nTryjob:")
+ print(tryjob_results[0])
+
+ # Add necessary information about the tryjob.
+ tryjob_results[0]["url"] = cl_url
+ tryjob_results[0]["rev"] = revision
+ tryjob_results[0][
+ "status"
+ ] = update_tryjob_status.TryjobStatus.PENDING.value
+ tryjob_results[0]["cl"] = cl
+
+ return tryjob_results[0]
+
+
+def AddTryjob(
+ packages,
+ git_hash,
+ revision,
+ chroot_path,
+ patch_metadata_file,
+ extra_cls,
+ options,
+ builder,
+ verbose,
+ svn_option,
+):
+ """Submits a tryjob."""
+
+ update_chromeos_llvm_hash.verbose = verbose
+
+ change_list = GetCLAfterUpdatingPackages(
+ packages,
+ git_hash,
+ revision,
+ chroot_path,
+ patch_metadata_file,
+ svn_option,
+ )
+
+ tryjob_dict = CreateNewTryjobEntryForBisection(
+ change_list.cl_number,
+ extra_cls,
+ options,
+ builder,
+ chroot_path,
+ change_list.url,
+ revision,
+ )
+
+ return tryjob_dict
+
+
+def PerformTryjobModification(
+ revision,
+ modify_tryjob,
+ status_file,
+ extra_cls,
+ options,
+ builder,
+ chroot_path,
+ verbose,
+):
+ """Removes, relaunches, or adds a tryjob.
+
+ Args:
+ revision: The revision associated with the tryjob.
+ modify_tryjob: What action to take on the tryjob.
+ Ex: ModifyTryjob.REMOVE, ModifyTryjob.RELAUNCH, ModifyTryjob.ADD
+ status_file: The .JSON file that contains the tryjobs.
+ extra_cls: Extra change lists to be run alongside tryjob
+ options: Extra options to pass into 'cros tryjob'.
+ builder: The builder to use for 'cros tryjob'.
+ chroot_path: The absolute path to the chroot (used by 'cros tryjob' when
+ relaunching a tryjob).
+ verbose: Determines whether to print the contents of a command to `stdout`.
+ """
+
+ # Format of 'bisect_contents':
+ # {
+ # 'start': [START_REVISION_OF_BISECTION]
+ # 'end': [END_REVISION_OF_BISECTION]
+ # 'jobs' : [
+ # {[TRYJOB_INFORMATION]},
+ # {[TRYJOB_INFORMATION]},
+ # ...,
+ # {[TRYJOB_INFORMATION]}
+ # ]
+ # }
+ with open(status_file) as tryjobs:
+ bisect_contents = json.load(tryjobs)
+
+ if not bisect_contents["jobs"] and modify_tryjob != ModifyTryjob.ADD:
+ sys.exit("No tryjobs in %s" % status_file)
+
+ tryjob_index = update_tryjob_status.FindTryjobIndex(
+ revision, bisect_contents["jobs"]
+ )
+
+ # 'FindTryjobIndex()' returns None if the tryjob was not found.
+ if tryjob_index is None and modify_tryjob != ModifyTryjob.ADD:
+ raise ValueError(
+ "Unable to find tryjob for %d in %s" % (revision, status_file)
+ )
+
+ # Determine the action to take based off of 'modify_tryjob'.
+ if modify_tryjob == ModifyTryjob.REMOVE:
+ del bisect_contents["jobs"][tryjob_index]
+
+ print("Successfully deleted the tryjob of revision %d" % revision)
+ elif modify_tryjob == ModifyTryjob.RELAUNCH:
+ # Need to update the tryjob link and buildbucket ID.
+ tryjob_results = update_packages_and_run_tests.RunTryJobs(
+ bisect_contents["jobs"][tryjob_index]["cl"],
+ bisect_contents["jobs"][tryjob_index]["extra_cls"],
+ bisect_contents["jobs"][tryjob_index]["options"],
+ bisect_contents["jobs"][tryjob_index]["builder"],
+ chroot_path,
+ )
+
+ bisect_contents["jobs"][tryjob_index][
+ "status"
+ ] = update_tryjob_status.TryjobStatus.PENDING.value
+ bisect_contents["jobs"][tryjob_index]["link"] = tryjob_results[0][
+ "link"
+ ]
+ bisect_contents["jobs"][tryjob_index][
+ "buildbucket_id"
+ ] = tryjob_results[0]["buildbucket_id"]
+
+ print(
+ "Successfully relaunched the tryjob for revision %d and updated "
+ "the tryjob link to %s" % (revision, tryjob_results[0]["link"])
+ )
+ elif modify_tryjob == ModifyTryjob.ADD:
+ # Tryjob exists already.
+ if tryjob_index is not None:
+ raise ValueError(
+ "Tryjob already exists (index is %d) in %s."
+ % (tryjob_index, status_file)
+ )
+
+ # Make sure the revision is within the bounds of the start and end of the
+ # bisection.
+ elif bisect_contents["start"] < revision < bisect_contents["end"]:
+
+ patch_metadata_file = "PATCHES.json"
+
+ (
+ git_hash,
+ revision,
+ ) = get_llvm_hash.GetLLVMHashAndVersionFromSVNOption(revision)
+
+ tryjob_dict = AddTryjob(
+ update_chromeos_llvm_hash.DEFAULT_PACKAGES,
+ git_hash,
+ revision,
+ chroot_path,
+ patch_metadata_file,
+ extra_cls,
+ options,
+ builder,
+ verbose,
+ revision,
+ )
+
+ bisect_contents["jobs"].append(tryjob_dict)
+
+ print("Successfully added tryjob of revision %d" % revision)
+ else:
+ raise ValueError("Failed to add tryjob to %s" % status_file)
else:
- raise ValueError('Failed to add tryjob to %s' % status_file)
- else:
- raise ValueError('Invalid "modify_tryjob" option provided: %s' %
- modify_tryjob)
+ raise ValueError(
+ 'Invalid "modify_tryjob" option provided: %s' % modify_tryjob
+ )
- with open(status_file, 'w') as update_tryjobs:
- json.dump(bisect_contents,
- update_tryjobs,
- indent=4,
- separators=(',', ': '))
+ with open(status_file, "w") as update_tryjobs:
+ json.dump(
+ bisect_contents, update_tryjobs, indent=4, separators=(",", ": ")
+ )
def main():
- """Removes, relaunches, or adds a tryjob."""
+ """Removes, relaunches, or adds a tryjob."""
- chroot.VerifyOutsideChroot()
+ chroot.VerifyOutsideChroot()
- args_output = GetCommandLineArgs()
+ args_output = GetCommandLineArgs()
- PerformTryjobModification(args_output.revision,
- ModifyTryjob(args_output.modify_tryjob),
- args_output.status_file,
- args_output.extra_change_lists,
- args_output.options, args_output.builder,
- args_output.chroot_path, args_output.verbose)
+ PerformTryjobModification(
+ args_output.revision,
+ ModifyTryjob(args_output.modify_tryjob),
+ args_output.status_file,
+ args_output.extra_change_lists,
+ args_output.options,
+ args_output.builder,
+ args_output.chroot_path,
+ args_output.verbose,
+ )
-if __name__ == '__main__':
- main()
+if __name__ == "__main__":
+ main()
diff --git a/llvm_tools/modify_a_tryjob_unittest.py b/llvm_tools/modify_a_tryjob_unittest.py
index e3c62972..712e2614 100755
--- a/llvm_tools/modify_a_tryjob_unittest.py
+++ b/llvm_tools/modify_a_tryjob_unittest.py
@@ -1,12 +1,11 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for modifying a tryjob."""
-from __future__ import print_function
import json
import unittest
@@ -20,383 +19,435 @@ import update_tryjob_status
class ModifyATryjobTest(unittest.TestCase):
- """Unittests for modifying a tryjob."""
-
- def testNoTryjobsInStatusFile(self):
- bisect_test_contents = {'start': 369410, 'end': 369420, 'jobs': []}
-
- # Create a temporary .JSON file to simulate a .JSON file that has bisection
- # contents.
- with test_helpers.CreateTemporaryJsonFile() as temp_json_file:
- with open(temp_json_file, 'w') as f:
- test_helpers.WritePrettyJsonFile(bisect_test_contents, f)
-
- revision_to_modify = 369411
-
- args_output = test_helpers.ArgsOutputTest()
- args_output.builders = None
- args_output.options = None
-
- # Verify the exception is raised there are no tryjobs in the status file
- # and the mode is not to 'add' a tryjob.
- with self.assertRaises(SystemExit) as err:
- modify_a_tryjob.PerformTryjobModification(
- revision_to_modify, modify_a_tryjob.ModifyTryjob.REMOVE,
- temp_json_file, args_output.extra_change_lists, args_output.options,
- args_output.builders, args_output.chroot_path, args_output.verbose)
-
- self.assertEqual(str(err.exception), 'No tryjobs in %s' % temp_json_file)
-
- # Simulate the behavior of `FindTryjobIndex()` when the index of the tryjob
- # was not found.
- @mock.patch.object(update_tryjob_status, 'FindTryjobIndex', return_value=None)
- def testNoTryjobIndexFound(self, mock_find_tryjob_index):
- bisect_test_contents = {
- 'start': 369410,
- 'end': 369420,
- 'jobs': [{
- 'rev': 369411,
- 'status': 'pending',
- 'buildbucket_id': 1200
- }]
- }
-
- # Create a temporary .JSON file to simulate a .JSON file that has bisection
- # contents.
- with test_helpers.CreateTemporaryJsonFile() as temp_json_file:
- with open(temp_json_file, 'w') as f:
- test_helpers.WritePrettyJsonFile(bisect_test_contents, f)
-
- revision_to_modify = 369412
-
- args_output = test_helpers.ArgsOutputTest()
- args_output.builders = None
- args_output.options = None
-
- # Verify the exception is raised when the index of the tryjob was not
- # found in the status file and the mode is not to 'add' a tryjob.
- with self.assertRaises(ValueError) as err:
- modify_a_tryjob.PerformTryjobModification(
- revision_to_modify, modify_a_tryjob.ModifyTryjob.REMOVE,
- temp_json_file, args_output.extra_change_lists, args_output.options,
- args_output.builders, args_output.chroot_path, args_output.verbose)
-
- self.assertEqual(
- str(err.exception), 'Unable to find tryjob for %d in %s' %
- (revision_to_modify, temp_json_file))
-
- mock_find_tryjob_index.assert_called_once()
-
- # Simulate the behavior of `FindTryjobIndex()` when the index of the tryjob
- # was found.
- @mock.patch.object(update_tryjob_status, 'FindTryjobIndex', return_value=0)
- def testSuccessfullyRemovedTryjobInStatusFile(self, mock_find_tryjob_index):
- bisect_test_contents = {
- 'start': 369410,
- 'end': 369420,
- 'jobs': [{
- 'rev': 369414,
- 'status': 'pending',
- 'buildbucket_id': 1200
- }]
- }
-
- # Create a temporary .JSON file to simulate a .JSON file that has bisection
- # contents.
- with test_helpers.CreateTemporaryJsonFile() as temp_json_file:
- with open(temp_json_file, 'w') as f:
- test_helpers.WritePrettyJsonFile(bisect_test_contents, f)
-
- revision_to_modify = 369414
-
- args_output = test_helpers.ArgsOutputTest()
- args_output.builders = None
- args_output.options = None
-
- modify_a_tryjob.PerformTryjobModification(
- revision_to_modify, modify_a_tryjob.ModifyTryjob.REMOVE,
- temp_json_file, args_output.extra_change_lists, args_output.options,
- args_output.builders, args_output.chroot_path, args_output.verbose)
-
- # Verify that the tryjob was removed from the status file.
- with open(temp_json_file) as status_file:
- bisect_contents = json.load(status_file)
-
- expected_file_contents = {'start': 369410, 'end': 369420, 'jobs': []}
-
- self.assertDictEqual(bisect_contents, expected_file_contents)
-
- mock_find_tryjob_index.assert_called_once()
-
- # Simulate the behavior of `RunTryJobs()` when successfully submitted a
- # tryjob.
- @mock.patch.object(update_packages_and_run_tests, 'RunTryJobs')
- # Simulate the behavior of `FindTryjobIndex()` when the index of the tryjob
- # was found.
- @mock.patch.object(update_tryjob_status, 'FindTryjobIndex', return_value=0)
- def testSuccessfullyRelaunchedTryjob(self, mock_find_tryjob_index,
- mock_run_tryjob):
-
- bisect_test_contents = {
- 'start':
- 369410,
- 'end':
- 369420,
- 'jobs': [{
- 'rev': 369411,
- 'status': 'bad',
- 'link': 'https://some_tryjob_link.com',
- 'buildbucket_id': 1200,
- 'cl': 123,
- 'extra_cls': None,
- 'options': None,
- 'builder': ['some-builder-tryjob']
- }]
- }
-
- tryjob_result = [{
- 'link': 'https://some_new_tryjob_link.com',
- 'buildbucket_id': 20
- }]
-
- mock_run_tryjob.return_value = tryjob_result
-
- # Create a temporary .JSON file to simulate a .JSON file that has bisection
- # contents.
- with test_helpers.CreateTemporaryJsonFile() as temp_json_file:
- with open(temp_json_file, 'w') as f:
- test_helpers.WritePrettyJsonFile(bisect_test_contents, f)
-
- revision_to_modify = 369411
-
- args_output = test_helpers.ArgsOutputTest()
- args_output.builders = None
- args_output.options = None
-
- modify_a_tryjob.PerformTryjobModification(
- revision_to_modify, modify_a_tryjob.ModifyTryjob.RELAUNCH,
- temp_json_file, args_output.extra_change_lists, args_output.options,
- args_output.builders, args_output.chroot_path, args_output.verbose)
-
- # Verify that the tryjob's information was updated after submtting the
- # tryjob.
- with open(temp_json_file) as status_file:
- bisect_contents = json.load(status_file)
-
- expected_file_contents = {
- 'start':
- 369410,
- 'end':
- 369420,
- 'jobs': [{
- 'rev': 369411,
- 'status': 'pending',
- 'link': 'https://some_new_tryjob_link.com',
- 'buildbucket_id': 20,
- 'cl': 123,
- 'extra_cls': None,
- 'options': None,
- 'builder': ['some-builder-tryjob']
- }]
+ """Unittests for modifying a tryjob."""
+
+ def testNoTryjobsInStatusFile(self):
+ bisect_test_contents = {"start": 369410, "end": 369420, "jobs": []}
+
+ # Create a temporary .JSON file to simulate a .JSON file that has bisection
+ # contents.
+ with test_helpers.CreateTemporaryJsonFile() as temp_json_file:
+ with open(temp_json_file, "w") as f:
+ test_helpers.WritePrettyJsonFile(bisect_test_contents, f)
+
+ revision_to_modify = 369411
+
+ args_output = test_helpers.ArgsOutputTest()
+ args_output.builders = None
+ args_output.options = None
+
+ # Verify the exception is raised there are no tryjobs in the status file
+ # and the mode is not to 'add' a tryjob.
+ with self.assertRaises(SystemExit) as err:
+ modify_a_tryjob.PerformTryjobModification(
+ revision_to_modify,
+ modify_a_tryjob.ModifyTryjob.REMOVE,
+ temp_json_file,
+ args_output.extra_change_lists,
+ args_output.options,
+ args_output.builders,
+ args_output.chroot_path,
+ args_output.verbose,
+ )
+
+ self.assertEqual(
+ str(err.exception), "No tryjobs in %s" % temp_json_file
+ )
+
+ # Simulate the behavior of `FindTryjobIndex()` when the index of the tryjob
+ # was not found.
+ @mock.patch.object(
+ update_tryjob_status, "FindTryjobIndex", return_value=None
+ )
+ def testNoTryjobIndexFound(self, mock_find_tryjob_index):
+ bisect_test_contents = {
+ "start": 369410,
+ "end": 369420,
+ "jobs": [
+ {"rev": 369411, "status": "pending", "buildbucket_id": 1200}
+ ],
}
- self.assertDictEqual(bisect_contents, expected_file_contents)
-
- mock_find_tryjob_index.assert_called_once()
-
- mock_run_tryjob.assert_called_once()
-
- # Simulate the behavior of `FindTryjobIndex()` when the index of the tryjob
- # was found.
- @mock.patch.object(update_tryjob_status, 'FindTryjobIndex', return_value=0)
- def testAddingTryjobThatAlreadyExists(self, mock_find_tryjob_index):
- bisect_test_contents = {
- 'start': 369410,
- 'end': 369420,
- 'jobs': [{
- 'rev': 369411,
- 'status': 'bad',
- 'builder': ['some-builder']
- }]
- }
-
- # Create a temporary .JSON file to simulate a .JSON file that has bisection
- # contents.
- with test_helpers.CreateTemporaryJsonFile() as temp_json_file:
- with open(temp_json_file, 'w') as f:
- test_helpers.WritePrettyJsonFile(bisect_test_contents, f)
-
- revision_to_add = 369411
-
- # Index of the tryjob in 'jobs' list.
- tryjob_index = 0
-
- args_output = test_helpers.ArgsOutputTest()
- args_output.options = None
-
- # Verify the exception is raised when the tryjob that is going to added
- # already exists in the status file (found its index).
- with self.assertRaises(ValueError) as err:
- modify_a_tryjob.PerformTryjobModification(
- revision_to_add, modify_a_tryjob.ModifyTryjob.ADD, temp_json_file,
- args_output.extra_change_lists, args_output.options,
- args_output.builders, args_output.chroot_path, args_output.verbose)
-
- self.assertEqual(
- str(err.exception), 'Tryjob already exists (index is %d) in %s.' %
- (tryjob_index, temp_json_file))
-
- mock_find_tryjob_index.assert_called_once()
-
- # Simulate the behavior of `FindTryjobIndex()` when the tryjob was not found.
- @mock.patch.object(update_tryjob_status, 'FindTryjobIndex', return_value=None)
- def testSuccessfullyDidNotAddTryjobOutsideOfBisectionBounds(
- self, mock_find_tryjob_index):
-
- bisect_test_contents = {
- 'start': 369410,
- 'end': 369420,
- 'jobs': [{
- 'rev': 369411,
- 'status': 'bad'
- }]
- }
-
- # Create a temporary .JSON file to simulate a .JSON file that has bisection
- # contents.
- with test_helpers.CreateTemporaryJsonFile() as temp_json_file:
- with open(temp_json_file, 'w') as f:
- test_helpers.WritePrettyJsonFile(bisect_test_contents, f)
-
- # Add a revision that is outside of 'start' and 'end'.
- revision_to_add = 369450
-
- args_output = test_helpers.ArgsOutputTest()
- args_output.options = None
-
- # Verify the exception is raised when adding a tryjob that does not exist
- # and is not within 'start' and 'end'.
- with self.assertRaises(ValueError) as err:
- modify_a_tryjob.PerformTryjobModification(
- revision_to_add, modify_a_tryjob.ModifyTryjob.ADD, temp_json_file,
- args_output.extra_change_lists, args_output.options,
- args_output.builders, args_output.chroot_path, args_output.verbose)
-
- self.assertEqual(
- str(err.exception), 'Failed to add tryjob to %s' % temp_json_file)
-
- mock_find_tryjob_index.assert_called_once()
-
- # Simulate the behavior of `AddTryjob()` when successfully submitted the
- # tryjob and constructed the tryjob information (a dictionary).
- @mock.patch.object(modify_a_tryjob, 'AddTryjob')
- # Simulate the behavior of `GetLLVMHashAndVersionFromSVNOption()` when
- # successfully retrieved the git hash of the revision to launch a tryjob for.
- @mock.patch.object(
- get_llvm_hash,
- 'GetLLVMHashAndVersionFromSVNOption',
- return_value=('a123testhash1', 369418))
- # Simulate the behavior of `FindTryjobIndex()` when the tryjob was not found.
- @mock.patch.object(update_tryjob_status, 'FindTryjobIndex', return_value=None)
- def testSuccessfullyAddedTryjob(self, mock_find_tryjob_index,
- mock_get_llvm_hash, mock_add_tryjob):
-
- bisect_test_contents = {
- 'start': 369410,
- 'end': 369420,
- 'jobs': [{
- 'rev': 369411,
- 'status': 'bad'
- }]
- }
-
- # Create a temporary .JSON file to simulate a .JSON file that has bisection
- # contents.
- with test_helpers.CreateTemporaryJsonFile() as temp_json_file:
- with open(temp_json_file, 'w') as f:
- test_helpers.WritePrettyJsonFile(bisect_test_contents, f)
-
- # Add a revision that is outside of 'start' and 'end'.
- revision_to_add = 369418
-
- args_output = test_helpers.ArgsOutputTest()
- args_output.options = None
-
- new_tryjob_info = {
- 'rev': revision_to_add,
- 'status': 'pending',
- 'options': args_output.options,
- 'extra_cls': args_output.extra_change_lists,
- 'builder': args_output.builders
- }
-
- mock_add_tryjob.return_value = new_tryjob_info
-
- modify_a_tryjob.PerformTryjobModification(
- revision_to_add, modify_a_tryjob.ModifyTryjob.ADD, temp_json_file,
- args_output.extra_change_lists, args_output.options,
- args_output.builders, args_output.chroot_path, args_output.verbose)
-
- # Verify that the tryjob was added to the status file.
- with open(temp_json_file) as status_file:
- bisect_contents = json.load(status_file)
-
- expected_file_contents = {
- 'start': 369410,
- 'end': 369420,
- 'jobs': [{
- 'rev': 369411,
- 'status': 'bad'
- }, new_tryjob_info]
+ # Create a temporary .JSON file to simulate a .JSON file that has bisection
+ # contents.
+ with test_helpers.CreateTemporaryJsonFile() as temp_json_file:
+ with open(temp_json_file, "w") as f:
+ test_helpers.WritePrettyJsonFile(bisect_test_contents, f)
+
+ revision_to_modify = 369412
+
+ args_output = test_helpers.ArgsOutputTest()
+ args_output.builders = None
+ args_output.options = None
+
+ # Verify the exception is raised when the index of the tryjob was not
+ # found in the status file and the mode is not to 'add' a tryjob.
+ with self.assertRaises(ValueError) as err:
+ modify_a_tryjob.PerformTryjobModification(
+ revision_to_modify,
+ modify_a_tryjob.ModifyTryjob.REMOVE,
+ temp_json_file,
+ args_output.extra_change_lists,
+ args_output.options,
+ args_output.builders,
+ args_output.chroot_path,
+ args_output.verbose,
+ )
+
+ self.assertEqual(
+ str(err.exception),
+ "Unable to find tryjob for %d in %s"
+ % (revision_to_modify, temp_json_file),
+ )
+
+ mock_find_tryjob_index.assert_called_once()
+
+ # Simulate the behavior of `FindTryjobIndex()` when the index of the tryjob
+ # was found.
+ @mock.patch.object(update_tryjob_status, "FindTryjobIndex", return_value=0)
+ def testSuccessfullyRemovedTryjobInStatusFile(self, mock_find_tryjob_index):
+ bisect_test_contents = {
+ "start": 369410,
+ "end": 369420,
+ "jobs": [
+ {"rev": 369414, "status": "pending", "buildbucket_id": 1200}
+ ],
}
- self.assertDictEqual(bisect_contents, expected_file_contents)
-
- mock_find_tryjob_index.assert_called_once()
-
- mock_get_llvm_hash.assert_called_once_with(revision_to_add)
-
- mock_add_tryjob.assert_called_once()
-
- # Simulate the behavior of `FindTryjobIndex()` when the tryjob was found.
- @mock.patch.object(update_tryjob_status, 'FindTryjobIndex', return_value=0)
- def testModifyATryjobOptionDoesNotExist(self, mock_find_tryjob_index):
- bisect_test_contents = {
- 'start': 369410,
- 'end': 369420,
- 'jobs': [{
- 'rev': 369414,
- 'status': 'bad'
- }]
- }
-
- # Create a temporary .JSON file to simulate a .JSON file that has bisection
- # contents.
- with test_helpers.CreateTemporaryJsonFile() as temp_json_file:
- with open(temp_json_file, 'w') as f:
- test_helpers.WritePrettyJsonFile(bisect_test_contents, f)
-
- # Add a revision that is outside of 'start' and 'end'.
- revision_to_modify = 369414
-
- args_output = test_helpers.ArgsOutputTest()
- args_output.builders = None
- args_output.options = None
+ # Create a temporary .JSON file to simulate a .JSON file that has bisection
+ # contents.
+ with test_helpers.CreateTemporaryJsonFile() as temp_json_file:
+ with open(temp_json_file, "w") as f:
+ test_helpers.WritePrettyJsonFile(bisect_test_contents, f)
+
+ revision_to_modify = 369414
+
+ args_output = test_helpers.ArgsOutputTest()
+ args_output.builders = None
+ args_output.options = None
+
+ modify_a_tryjob.PerformTryjobModification(
+ revision_to_modify,
+ modify_a_tryjob.ModifyTryjob.REMOVE,
+ temp_json_file,
+ args_output.extra_change_lists,
+ args_output.options,
+ args_output.builders,
+ args_output.chroot_path,
+ args_output.verbose,
+ )
+
+ # Verify that the tryjob was removed from the status file.
+ with open(temp_json_file) as status_file:
+ bisect_contents = json.load(status_file)
+
+ expected_file_contents = {
+ "start": 369410,
+ "end": 369420,
+ "jobs": [],
+ }
+
+ self.assertDictEqual(bisect_contents, expected_file_contents)
+
+ mock_find_tryjob_index.assert_called_once()
+
+ # Simulate the behavior of `RunTryJobs()` when successfully submitted a
+ # tryjob.
+ @mock.patch.object(update_packages_and_run_tests, "RunTryJobs")
+ # Simulate the behavior of `FindTryjobIndex()` when the index of the tryjob
+ # was found.
+ @mock.patch.object(update_tryjob_status, "FindTryjobIndex", return_value=0)
+ def testSuccessfullyRelaunchedTryjob(
+ self, mock_find_tryjob_index, mock_run_tryjob
+ ):
+
+ bisect_test_contents = {
+ "start": 369410,
+ "end": 369420,
+ "jobs": [
+ {
+ "rev": 369411,
+ "status": "bad",
+ "link": "https://some_tryjob_link.com",
+ "buildbucket_id": 1200,
+ "cl": 123,
+ "extra_cls": None,
+ "options": None,
+ "builder": ["some-builder-tryjob"],
+ }
+ ],
+ }
- # Verify the exception is raised when the modify a tryjob option does not
- # exist.
- with self.assertRaises(ValueError) as err:
- modify_a_tryjob.PerformTryjobModification(
- revision_to_modify, 'remove_link', temp_json_file,
- args_output.extra_change_lists, args_output.options,
- args_output.builders, args_output.chroot_path, args_output.verbose)
+ tryjob_result = [
+ {"link": "https://some_new_tryjob_link.com", "buildbucket_id": 20}
+ ]
+
+ mock_run_tryjob.return_value = tryjob_result
+
+ # Create a temporary .JSON file to simulate a .JSON file that has bisection
+ # contents.
+ with test_helpers.CreateTemporaryJsonFile() as temp_json_file:
+ with open(temp_json_file, "w") as f:
+ test_helpers.WritePrettyJsonFile(bisect_test_contents, f)
+
+ revision_to_modify = 369411
+
+ args_output = test_helpers.ArgsOutputTest()
+ args_output.builders = None
+ args_output.options = None
+
+ modify_a_tryjob.PerformTryjobModification(
+ revision_to_modify,
+ modify_a_tryjob.ModifyTryjob.RELAUNCH,
+ temp_json_file,
+ args_output.extra_change_lists,
+ args_output.options,
+ args_output.builders,
+ args_output.chroot_path,
+ args_output.verbose,
+ )
+
+ # Verify that the tryjob's information was updated after submtting the
+ # tryjob.
+ with open(temp_json_file) as status_file:
+ bisect_contents = json.load(status_file)
+
+ expected_file_contents = {
+ "start": 369410,
+ "end": 369420,
+ "jobs": [
+ {
+ "rev": 369411,
+ "status": "pending",
+ "link": "https://some_new_tryjob_link.com",
+ "buildbucket_id": 20,
+ "cl": 123,
+ "extra_cls": None,
+ "options": None,
+ "builder": ["some-builder-tryjob"],
+ }
+ ],
+ }
+
+ self.assertDictEqual(bisect_contents, expected_file_contents)
+
+ mock_find_tryjob_index.assert_called_once()
+
+ mock_run_tryjob.assert_called_once()
+
+ # Simulate the behavior of `FindTryjobIndex()` when the index of the tryjob
+ # was found.
+ @mock.patch.object(update_tryjob_status, "FindTryjobIndex", return_value=0)
+ def testAddingTryjobThatAlreadyExists(self, mock_find_tryjob_index):
+ bisect_test_contents = {
+ "start": 369410,
+ "end": 369420,
+ "jobs": [
+ {"rev": 369411, "status": "bad", "builder": ["some-builder"]}
+ ],
+ }
- self.assertEqual(
- str(err.exception),
- 'Invalid "modify_tryjob" option provided: remove_link')
+ # Create a temporary .JSON file to simulate a .JSON file that has bisection
+ # contents.
+ with test_helpers.CreateTemporaryJsonFile() as temp_json_file:
+ with open(temp_json_file, "w") as f:
+ test_helpers.WritePrettyJsonFile(bisect_test_contents, f)
+
+ revision_to_add = 369411
+
+ # Index of the tryjob in 'jobs' list.
+ tryjob_index = 0
+
+ args_output = test_helpers.ArgsOutputTest()
+ args_output.options = None
+
+ # Verify the exception is raised when the tryjob that is going to added
+ # already exists in the status file (found its index).
+ with self.assertRaises(ValueError) as err:
+ modify_a_tryjob.PerformTryjobModification(
+ revision_to_add,
+ modify_a_tryjob.ModifyTryjob.ADD,
+ temp_json_file,
+ args_output.extra_change_lists,
+ args_output.options,
+ args_output.builders,
+ args_output.chroot_path,
+ args_output.verbose,
+ )
+
+ self.assertEqual(
+ str(err.exception),
+ "Tryjob already exists (index is %d) in %s."
+ % (tryjob_index, temp_json_file),
+ )
+
+ mock_find_tryjob_index.assert_called_once()
+
+ # Simulate the behavior of `FindTryjobIndex()` when the tryjob was not found.
+ @mock.patch.object(
+ update_tryjob_status, "FindTryjobIndex", return_value=None
+ )
+ def testSuccessfullyDidNotAddTryjobOutsideOfBisectionBounds(
+ self, mock_find_tryjob_index
+ ):
+
+ bisect_test_contents = {
+ "start": 369410,
+ "end": 369420,
+ "jobs": [{"rev": 369411, "status": "bad"}],
+ }
- mock_find_tryjob_index.assert_called_once()
+ # Create a temporary .JSON file to simulate a .JSON file that has bisection
+ # contents.
+ with test_helpers.CreateTemporaryJsonFile() as temp_json_file:
+ with open(temp_json_file, "w") as f:
+ test_helpers.WritePrettyJsonFile(bisect_test_contents, f)
+
+ # Add a revision that is outside of 'start' and 'end'.
+ revision_to_add = 369450
+
+ args_output = test_helpers.ArgsOutputTest()
+ args_output.options = None
+
+ # Verify the exception is raised when adding a tryjob that does not exist
+ # and is not within 'start' and 'end'.
+ with self.assertRaises(ValueError) as err:
+ modify_a_tryjob.PerformTryjobModification(
+ revision_to_add,
+ modify_a_tryjob.ModifyTryjob.ADD,
+ temp_json_file,
+ args_output.extra_change_lists,
+ args_output.options,
+ args_output.builders,
+ args_output.chroot_path,
+ args_output.verbose,
+ )
+
+ self.assertEqual(
+ str(err.exception),
+ "Failed to add tryjob to %s" % temp_json_file,
+ )
+
+ mock_find_tryjob_index.assert_called_once()
+
+ # Simulate the behavior of `AddTryjob()` when successfully submitted the
+ # tryjob and constructed the tryjob information (a dictionary).
+ @mock.patch.object(modify_a_tryjob, "AddTryjob")
+ # Simulate the behavior of `GetLLVMHashAndVersionFromSVNOption()` when
+ # successfully retrieved the git hash of the revision to launch a tryjob for.
+ @mock.patch.object(
+ get_llvm_hash,
+ "GetLLVMHashAndVersionFromSVNOption",
+ return_value=("a123testhash1", 369418),
+ )
+ # Simulate the behavior of `FindTryjobIndex()` when the tryjob was not found.
+ @mock.patch.object(
+ update_tryjob_status, "FindTryjobIndex", return_value=None
+ )
+ def testSuccessfullyAddedTryjob(
+ self, mock_find_tryjob_index, mock_get_llvm_hash, mock_add_tryjob
+ ):
+
+ bisect_test_contents = {
+ "start": 369410,
+ "end": 369420,
+ "jobs": [{"rev": 369411, "status": "bad"}],
+ }
+ # Create a temporary .JSON file to simulate a .JSON file that has bisection
+ # contents.
+ with test_helpers.CreateTemporaryJsonFile() as temp_json_file:
+ with open(temp_json_file, "w") as f:
+ test_helpers.WritePrettyJsonFile(bisect_test_contents, f)
+
+ # Add a revision that is outside of 'start' and 'end'.
+ revision_to_add = 369418
+
+ args_output = test_helpers.ArgsOutputTest()
+ args_output.options = None
+
+ new_tryjob_info = {
+ "rev": revision_to_add,
+ "status": "pending",
+ "options": args_output.options,
+ "extra_cls": args_output.extra_change_lists,
+ "builder": args_output.builders,
+ }
+
+ mock_add_tryjob.return_value = new_tryjob_info
+
+ modify_a_tryjob.PerformTryjobModification(
+ revision_to_add,
+ modify_a_tryjob.ModifyTryjob.ADD,
+ temp_json_file,
+ args_output.extra_change_lists,
+ args_output.options,
+ args_output.builders,
+ args_output.chroot_path,
+ args_output.verbose,
+ )
+
+ # Verify that the tryjob was added to the status file.
+ with open(temp_json_file) as status_file:
+ bisect_contents = json.load(status_file)
+
+ expected_file_contents = {
+ "start": 369410,
+ "end": 369420,
+ "jobs": [{"rev": 369411, "status": "bad"}, new_tryjob_info],
+ }
+
+ self.assertDictEqual(bisect_contents, expected_file_contents)
+
+ mock_find_tryjob_index.assert_called_once()
+
+ mock_get_llvm_hash.assert_called_once_with(revision_to_add)
+
+ mock_add_tryjob.assert_called_once()
+
+ # Simulate the behavior of `FindTryjobIndex()` when the tryjob was found.
+ @mock.patch.object(update_tryjob_status, "FindTryjobIndex", return_value=0)
+ def testModifyATryjobOptionDoesNotExist(self, mock_find_tryjob_index):
+ bisect_test_contents = {
+ "start": 369410,
+ "end": 369420,
+ "jobs": [{"rev": 369414, "status": "bad"}],
+ }
-if __name__ == '__main__':
- unittest.main()
+ # Create a temporary .JSON file to simulate a .JSON file that has bisection
+ # contents.
+ with test_helpers.CreateTemporaryJsonFile() as temp_json_file:
+ with open(temp_json_file, "w") as f:
+ test_helpers.WritePrettyJsonFile(bisect_test_contents, f)
+
+ # Add a revision that is outside of 'start' and 'end'.
+ revision_to_modify = 369414
+
+ args_output = test_helpers.ArgsOutputTest()
+ args_output.builders = None
+ args_output.options = None
+
+ # Verify the exception is raised when the modify a tryjob option does not
+ # exist.
+ with self.assertRaises(ValueError) as err:
+ modify_a_tryjob.PerformTryjobModification(
+ revision_to_modify,
+ "remove_link",
+ temp_json_file,
+ args_output.extra_change_lists,
+ args_output.options,
+ args_output.builders,
+ args_output.chroot_path,
+ args_output.verbose,
+ )
+
+ self.assertEqual(
+ str(err.exception),
+ 'Invalid "modify_tryjob" option provided: remove_link',
+ )
+
+ mock_find_tryjob_index.assert_called_once()
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/llvm_tools/nightly_revert_checker.py b/llvm_tools/nightly_revert_checker.py
index 89485088..d12464a6 100755
--- a/llvm_tools/nightly_revert_checker.py
+++ b/llvm_tools/nightly_revert_checker.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -10,7 +10,6 @@ If any reverts are found that were previously unknown, this cherry-picks them or
fires off an email. All LLVM SHAs to monitor are autodetected.
"""
-from __future__ import print_function
import argparse
import io
@@ -24,383 +23,462 @@ import typing as t
import cros_utils.email_sender as email_sender
import cros_utils.tiny_render as tiny_render
-
import get_llvm_hash
import get_upstream_patch
import git_llvm_rev
import revert_checker
+
State = t.Any
-def _find_interesting_android_shas(android_llvm_toolchain_dir: str
- ) -> t.List[t.Tuple[str, str]]:
- llvm_project = os.path.join(android_llvm_toolchain_dir,
- 'toolchain/llvm-project')
-
- def get_llvm_merge_base(branch: str) -> str:
- head_sha = subprocess.check_output(
- ['git', 'rev-parse', branch],
- cwd=llvm_project,
- encoding='utf-8',
- ).strip()
- merge_base = subprocess.check_output(
- ['git', 'merge-base', branch, 'aosp/upstream-main'],
- cwd=llvm_project,
- encoding='utf-8',
- ).strip()
- logging.info('Merge-base for %s (HEAD == %s) and upstream-main is %s',
- branch, head_sha, merge_base)
- return merge_base
-
- main_legacy = get_llvm_merge_base('aosp/master-legacy') # nocheck
- testing_upstream = get_llvm_merge_base('aosp/testing-upstream')
- result = [('main-legacy', main_legacy)]
-
- # If these are the same SHA, there's no point in tracking both.
- if main_legacy != testing_upstream:
- result.append(('testing-upstream', testing_upstream))
- else:
- logging.info('main-legacy and testing-upstream are identical; ignoring '
- 'the latter.')
- return result
-
-
-def _parse_llvm_ebuild_for_shas(ebuild_file: io.TextIOWrapper
- ) -> t.List[t.Tuple[str, str]]:
- def parse_ebuild_assignment(line: str) -> str:
- no_comments = line.split('#')[0]
- no_assign = no_comments.split('=', 1)[1].strip()
- assert no_assign.startswith('"') and no_assign.endswith('"'), no_assign
- return no_assign[1:-1]
-
- llvm_hash, llvm_next_hash = None, None
- for line in ebuild_file:
- if line.startswith('LLVM_HASH='):
- llvm_hash = parse_ebuild_assignment(line)
- if llvm_next_hash:
- break
- if line.startswith('LLVM_NEXT_HASH'):
- llvm_next_hash = parse_ebuild_assignment(line)
- if llvm_hash:
- break
- if not llvm_next_hash or not llvm_hash:
- raise ValueError('Failed to detect SHAs for llvm/llvm_next. Got: '
- 'llvm=%s; llvm_next=%s' % (llvm_hash, llvm_next_hash))
-
- results = [('llvm', llvm_hash)]
- if llvm_next_hash != llvm_hash:
- results.append(('llvm-next', llvm_next_hash))
- return results
-
-
-def _find_interesting_chromeos_shas(chromeos_base: str
- ) -> t.List[t.Tuple[str, str]]:
- llvm_dir = os.path.join(chromeos_base,
- 'src/third_party/chromiumos-overlay/sys-devel/llvm')
- candidate_ebuilds = [
- os.path.join(llvm_dir, x) for x in os.listdir(llvm_dir)
- if '_pre' in x and not os.path.islink(os.path.join(llvm_dir, x))
- ]
-
- if len(candidate_ebuilds) != 1:
- raise ValueError('Expected exactly one llvm ebuild candidate; got %s' %
- pprint.pformat(candidate_ebuilds))
-
- with open(candidate_ebuilds[0], encoding='utf-8') as f:
- return _parse_llvm_ebuild_for_shas(f)
-
-
-_Email = t.NamedTuple('_Email', [
- ('subject', str),
- ('body', tiny_render.Piece),
-])
+def _find_interesting_android_shas(
+ android_llvm_toolchain_dir: str,
+) -> t.List[t.Tuple[str, str]]:
+ llvm_project = os.path.join(
+ android_llvm_toolchain_dir, "toolchain/llvm-project"
+ )
+
+ def get_llvm_merge_base(branch: str) -> str:
+ head_sha = subprocess.check_output(
+ ["git", "rev-parse", branch],
+ cwd=llvm_project,
+ encoding="utf-8",
+ ).strip()
+ merge_base = subprocess.check_output(
+ ["git", "merge-base", branch, "aosp/upstream-main"],
+ cwd=llvm_project,
+ encoding="utf-8",
+ ).strip()
+ logging.info(
+ "Merge-base for %s (HEAD == %s) and upstream-main is %s",
+ branch,
+ head_sha,
+ merge_base,
+ )
+ return merge_base
+
+ main_legacy = get_llvm_merge_base("aosp/master-legacy") # nocheck
+ testing_upstream = get_llvm_merge_base("aosp/testing-upstream")
+ result = [("main-legacy", main_legacy)]
+
+ # If these are the same SHA, there's no point in tracking both.
+ if main_legacy != testing_upstream:
+ result.append(("testing-upstream", testing_upstream))
+ else:
+ logging.info(
+ "main-legacy and testing-upstream are identical; ignoring "
+ "the latter."
+ )
+ return result
+
+
+def _parse_llvm_ebuild_for_shas(
+ ebuild_file: io.TextIOWrapper,
+) -> t.List[t.Tuple[str, str]]:
+ def parse_ebuild_assignment(line: str) -> str:
+ no_comments = line.split("#")[0]
+ no_assign = no_comments.split("=", 1)[1].strip()
+ assert no_assign.startswith('"') and no_assign.endswith('"'), no_assign
+ return no_assign[1:-1]
+
+ llvm_hash, llvm_next_hash = None, None
+ for line in ebuild_file:
+ if line.startswith("LLVM_HASH="):
+ llvm_hash = parse_ebuild_assignment(line)
+ if llvm_next_hash:
+ break
+ if line.startswith("LLVM_NEXT_HASH"):
+ llvm_next_hash = parse_ebuild_assignment(line)
+ if llvm_hash:
+ break
+ if not llvm_next_hash or not llvm_hash:
+ raise ValueError(
+ "Failed to detect SHAs for llvm/llvm_next. Got: "
+ "llvm=%s; llvm_next=%s" % (llvm_hash, llvm_next_hash)
+ )
+
+ results = [("llvm", llvm_hash)]
+ if llvm_next_hash != llvm_hash:
+ results.append(("llvm-next", llvm_next_hash))
+ return results
+
+
+def _find_interesting_chromeos_shas(
+ chromeos_base: str,
+) -> t.List[t.Tuple[str, str]]:
+ llvm_dir = os.path.join(
+ chromeos_base, "src/third_party/chromiumos-overlay/sys-devel/llvm"
+ )
+ candidate_ebuilds = [
+ os.path.join(llvm_dir, x)
+ for x in os.listdir(llvm_dir)
+ if "_pre" in x and not os.path.islink(os.path.join(llvm_dir, x))
+ ]
+
+ if len(candidate_ebuilds) != 1:
+ raise ValueError(
+ "Expected exactly one llvm ebuild candidate; got %s"
+ % pprint.pformat(candidate_ebuilds)
+ )
+
+ with open(candidate_ebuilds[0], encoding="utf-8") as f:
+ return _parse_llvm_ebuild_for_shas(f)
+
+
+_Email = t.NamedTuple(
+ "_Email",
+ [
+ ("subject", str),
+ ("body", tiny_render.Piece),
+ ],
+)
def _generate_revert_email(
- repository_name: str, friendly_name: str, sha: str,
+ repository_name: str,
+ friendly_name: str,
+ sha: str,
prettify_sha: t.Callable[[str], tiny_render.Piece],
get_sha_description: t.Callable[[str], tiny_render.Piece],
- new_reverts: t.List[revert_checker.Revert]) -> _Email:
- email_pieces = [
- 'It looks like there may be %s across %s (' % (
- 'a new revert' if len(new_reverts) == 1 else 'new reverts',
- friendly_name,
- ),
- prettify_sha(sha),
- ').',
- tiny_render.line_break,
- tiny_render.line_break,
- 'That is:' if len(new_reverts) == 1 else 'These are:',
- ]
-
- revert_listing = []
- for revert in sorted(new_reverts, key=lambda r: r.sha):
- revert_listing.append([
- prettify_sha(revert.sha),
- ' (appears to revert ',
- prettify_sha(revert.reverted_sha),
- '): ',
- get_sha_description(revert.sha),
- ])
-
- email_pieces.append(tiny_render.UnorderedList(items=revert_listing))
- email_pieces += [
- tiny_render.line_break,
- 'PTAL and consider reverting them locally.',
- ]
- return _Email(
- subject='[revert-checker/%s] new %s discovered across %s' % (
- repository_name,
- 'revert' if len(new_reverts) == 1 else 'reverts',
- friendly_name,
- ),
- body=email_pieces,
- )
+ new_reverts: t.List[revert_checker.Revert],
+) -> _Email:
+ email_pieces = [
+ "It looks like there may be %s across %s ("
+ % (
+ "a new revert" if len(new_reverts) == 1 else "new reverts",
+ friendly_name,
+ ),
+ prettify_sha(sha),
+ ").",
+ tiny_render.line_break,
+ tiny_render.line_break,
+ "That is:" if len(new_reverts) == 1 else "These are:",
+ ]
+
+ revert_listing = []
+ for revert in sorted(new_reverts, key=lambda r: r.sha):
+ revert_listing.append(
+ [
+ prettify_sha(revert.sha),
+ " (appears to revert ",
+ prettify_sha(revert.reverted_sha),
+ "): ",
+ get_sha_description(revert.sha),
+ ]
+ )
+
+ email_pieces.append(tiny_render.UnorderedList(items=revert_listing))
+ email_pieces += [
+ tiny_render.line_break,
+ "PTAL and consider reverting them locally.",
+ ]
+ return _Email(
+ subject="[revert-checker/%s] new %s discovered across %s"
+ % (
+ repository_name,
+ "revert" if len(new_reverts) == 1 else "reverts",
+ friendly_name,
+ ),
+ body=email_pieces,
+ )
_EmailRecipients = t.NamedTuple(
- '_EmailRecipients',
+ "_EmailRecipients",
[
- ('well_known', t.List[str]),
- ('direct', t.List[str]),
+ ("well_known", t.List[str]),
+ ("direct", t.List[str]),
],
)
def _send_revert_email(recipients: _EmailRecipients, email: _Email) -> None:
- email_sender.EmailSender().SendX20Email(
- subject=email.subject,
- identifier='revert-checker',
- well_known_recipients=recipients.well_known,
- direct_recipients=['gbiv@google.com'] + recipients.direct,
- text_body=tiny_render.render_text_pieces(email.body),
- html_body=tiny_render.render_html_pieces(email.body),
- )
+ email_sender.EmailSender().SendX20Email(
+ subject=email.subject,
+ identifier="revert-checker",
+ well_known_recipients=recipients.well_known,
+ direct_recipients=["gbiv@google.com"] + recipients.direct,
+ text_body=tiny_render.render_text_pieces(email.body),
+ html_body=tiny_render.render_html_pieces(email.body),
+ )
def _write_state(state_file: str, new_state: State) -> None:
- try:
- tmp_file = state_file + '.new'
- with open(tmp_file, 'w', encoding='utf-8') as f:
- json.dump(new_state, f, sort_keys=True, indent=2, separators=(',', ': '))
- os.rename(tmp_file, state_file)
- except:
try:
- os.remove(tmp_file)
- except FileNotFoundError:
- pass
- raise
+ tmp_file = state_file + ".new"
+ with open(tmp_file, "w", encoding="utf-8") as f:
+ json.dump(
+ new_state, f, sort_keys=True, indent=2, separators=(",", ": ")
+ )
+ os.rename(tmp_file, state_file)
+ except:
+ try:
+ os.remove(tmp_file)
+ except FileNotFoundError:
+ pass
+ raise
def _read_state(state_file: str) -> State:
- try:
- with open(state_file) as f:
- return json.load(f)
- except FileNotFoundError:
- logging.info('No state file found at %r; starting with an empty slate',
- state_file)
- return {}
-
-
-def find_shas(llvm_dir: str, interesting_shas: t.List[t.Tuple[str, str]],
- state: State, new_state: State):
- for friendly_name, sha in interesting_shas:
- logging.info('Finding reverts across %s (%s)', friendly_name, sha)
- all_reverts = revert_checker.find_reverts(llvm_dir,
- sha,
- root='origin/' +
- git_llvm_rev.MAIN_BRANCH)
- logging.info('Detected the following revert(s) across %s:\n%s',
- friendly_name, pprint.pformat(all_reverts))
-
- new_state[sha] = [r.sha for r in all_reverts]
-
- if sha not in state:
- logging.info('SHA %s is new to me', sha)
- existing_reverts = set()
- else:
- existing_reverts = set(state[sha])
-
- new_reverts = [r for r in all_reverts if r.sha not in existing_reverts]
- if not new_reverts:
- logging.info('...All of which have been reported.')
- continue
-
- yield (friendly_name, sha, new_reverts)
-
-
-def do_cherrypick(chroot_path: str, llvm_dir: str,
- interesting_shas: t.List[t.Tuple[str, str]], state: State,
- reviewers: t.List[str], cc: t.List[str]) -> State:
- new_state: State = {}
- seen: t.Set[str] = set()
- for friendly_name, _sha, reverts in find_shas(llvm_dir, interesting_shas,
- state, new_state):
- if friendly_name in seen:
- continue
- seen.add(friendly_name)
- for sha, reverted_sha in reverts:
- try:
- # We upload reverts for all platforms by default, since there's no
- # real reason for them to be CrOS-specific.
- get_upstream_patch.get_from_upstream(chroot_path=chroot_path,
- create_cl=True,
- start_sha=reverted_sha,
- patches=[sha],
- reviewers=reviewers,
- cc=cc,
- platforms=())
- except get_upstream_patch.CherrypickError as e:
- logging.info('%s, skipping...', str(e))
- return new_state
-
-
-def do_email(is_dry_run: bool, llvm_dir: str, repository: str,
- interesting_shas: t.List[t.Tuple[str, str]], state: State,
- recipients: _EmailRecipients) -> State:
- def prettify_sha(sha: str) -> tiny_render.Piece:
- rev = get_llvm_hash.GetVersionFrom(llvm_dir, sha)
-
- # 12 is arbitrary, but should be unambiguous enough.
- short_sha = sha[:12]
- return tiny_render.Switch(
- text=f'r{rev} ({short_sha})',
- html=tiny_render.Link(href='https://reviews.llvm.org/rG' + sha,
- inner='r' + str(rev)),
+ try:
+ with open(state_file) as f:
+ return json.load(f)
+ except FileNotFoundError:
+ logging.info(
+ "No state file found at %r; starting with an empty slate",
+ state_file,
+ )
+ return {}
+
+
+def find_shas(
+ llvm_dir: str,
+ interesting_shas: t.List[t.Tuple[str, str]],
+ state: State,
+ new_state: State,
+):
+ for friendly_name, sha in interesting_shas:
+ logging.info("Finding reverts across %s (%s)", friendly_name, sha)
+ all_reverts = revert_checker.find_reverts(
+ llvm_dir, sha, root="origin/" + git_llvm_rev.MAIN_BRANCH
+ )
+ logging.info(
+ "Detected the following revert(s) across %s:\n%s",
+ friendly_name,
+ pprint.pformat(all_reverts),
+ )
+
+ new_state[sha] = [r.sha for r in all_reverts]
+
+ if sha not in state:
+ logging.info("SHA %s is new to me", sha)
+ existing_reverts = set()
+ else:
+ existing_reverts = set(state[sha])
+
+ new_reverts = [r for r in all_reverts if r.sha not in existing_reverts]
+ if not new_reverts:
+ logging.info("...All of which have been reported.")
+ continue
+
+ yield (friendly_name, sha, new_reverts)
+
+
+def do_cherrypick(
+ chroot_path: str,
+ llvm_dir: str,
+ interesting_shas: t.List[t.Tuple[str, str]],
+ state: State,
+ reviewers: t.List[str],
+ cc: t.List[str],
+) -> State:
+ new_state: State = {}
+ seen: t.Set[str] = set()
+ for friendly_name, _sha, reverts in find_shas(
+ llvm_dir, interesting_shas, state, new_state
+ ):
+ if friendly_name in seen:
+ continue
+ seen.add(friendly_name)
+ for sha, reverted_sha in reverts:
+ try:
+ # We upload reverts for all platforms by default, since there's no
+ # real reason for them to be CrOS-specific.
+ get_upstream_patch.get_from_upstream(
+ chroot_path=chroot_path,
+ create_cl=True,
+ start_sha=reverted_sha,
+ patches=[sha],
+ reviewers=reviewers,
+ cc=cc,
+ platforms=(),
+ )
+ except get_upstream_patch.CherrypickError as e:
+ logging.info("%s, skipping...", str(e))
+ return new_state
+
+
+def do_email(
+ is_dry_run: bool,
+ llvm_dir: str,
+ repository: str,
+ interesting_shas: t.List[t.Tuple[str, str]],
+ state: State,
+ recipients: _EmailRecipients,
+) -> State:
+ def prettify_sha(sha: str) -> tiny_render.Piece:
+ rev = get_llvm_hash.GetVersionFrom(llvm_dir, sha)
+
+ # 12 is arbitrary, but should be unambiguous enough.
+ short_sha = sha[:12]
+ return tiny_render.Switch(
+ text=f"r{rev} ({short_sha})",
+ html=tiny_render.Link(
+ href="https://reviews.llvm.org/rG" + sha, inner="r" + str(rev)
+ ),
+ )
+
+ def get_sha_description(sha: str) -> tiny_render.Piece:
+ return subprocess.check_output(
+ ["git", "log", "-n1", "--format=%s", sha],
+ cwd=llvm_dir,
+ encoding="utf-8",
+ ).strip()
+
+ new_state: State = {}
+ for friendly_name, sha, new_reverts in find_shas(
+ llvm_dir, interesting_shas, state, new_state
+ ):
+ email = _generate_revert_email(
+ repository,
+ friendly_name,
+ sha,
+ prettify_sha,
+ get_sha_description,
+ new_reverts,
+ )
+ if is_dry_run:
+ logging.info(
+ "Would send email:\nSubject: %s\nBody:\n%s\n",
+ email.subject,
+ tiny_render.render_text_pieces(email.body),
+ )
+ else:
+ logging.info("Sending email with subject %r...", email.subject)
+ _send_revert_email(recipients, email)
+ logging.info("Email sent.")
+ return new_state
+
+
+def parse_args(argv: t.List[str]) -> t.Any:
+ parser = argparse.ArgumentParser(
+ description=__doc__,
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ )
+ parser.add_argument(
+ "action",
+ choices=["cherry-pick", "email", "dry-run"],
+ help="Automatically cherry-pick upstream reverts, send an email, or "
+ "write to stdout.",
+ )
+ parser.add_argument(
+ "--state_file", required=True, help="File to store persistent state in."
+ )
+ parser.add_argument(
+ "--llvm_dir", required=True, help="Up-to-date LLVM directory to use."
+ )
+ parser.add_argument("--debug", action="store_true")
+ parser.add_argument(
+ "--reviewers",
+ type=str,
+ nargs="*",
+ help="Requests reviews from REVIEWERS. All REVIEWERS must have existing "
+ "accounts.",
+ )
+ parser.add_argument(
+ "--cc",
+ type=str,
+ nargs="*",
+ help="CCs the CL to the recipients. All recipients must have existing "
+ "accounts.",
)
- def get_sha_description(sha: str) -> tiny_render.Piece:
- return subprocess.check_output(
- ['git', 'log', '-n1', '--format=%s', sha],
- cwd=llvm_dir,
- encoding='utf-8',
- ).strip()
-
- new_state: State = {}
- for friendly_name, sha, new_reverts in find_shas(llvm_dir, interesting_shas,
- state, new_state):
- email = _generate_revert_email(repository, friendly_name, sha,
- prettify_sha, get_sha_description,
- new_reverts)
- if is_dry_run:
- logging.info('Would send email:\nSubject: %s\nBody:\n%s\n',
- email.subject, tiny_render.render_text_pieces(email.body))
- else:
- logging.info('Sending email with subject %r...', email.subject)
- _send_revert_email(recipients, email)
- logging.info('Email sent.')
- return new_state
+ subparsers = parser.add_subparsers(dest="repository")
+ subparsers.required = True
+ chromeos_subparser = subparsers.add_parser("chromeos")
+ chromeos_subparser.add_argument(
+ "--chromeos_dir",
+ required=True,
+ help="Up-to-date CrOS directory to use.",
+ )
-def parse_args(argv: t.List[str]) -> t.Any:
- parser = argparse.ArgumentParser(
- description=__doc__,
- formatter_class=argparse.RawDescriptionHelpFormatter)
- parser.add_argument(
- 'action',
- choices=['cherry-pick', 'email', 'dry-run'],
- help='Automatically cherry-pick upstream reverts, send an email, or '
- 'write to stdout.')
- parser.add_argument('--state_file',
- required=True,
- help='File to store persistent state in.')
- parser.add_argument('--llvm_dir',
- required=True,
- help='Up-to-date LLVM directory to use.')
- parser.add_argument('--debug', action='store_true')
- parser.add_argument(
- '--reviewers',
- type=str,
- nargs='*',
- help='Requests reviews from REVIEWERS. All REVIEWERS must have existing '
- 'accounts.')
- parser.add_argument(
- '--cc',
- type=str,
- nargs='*',
- help='CCs the CL to the recipients. All recipients must have existing '
- 'accounts.')
-
- subparsers = parser.add_subparsers(dest='repository')
- subparsers.required = True
-
- chromeos_subparser = subparsers.add_parser('chromeos')
- chromeos_subparser.add_argument('--chromeos_dir',
- required=True,
- help='Up-to-date CrOS directory to use.')
-
- android_subparser = subparsers.add_parser('android')
- android_subparser.add_argument(
- '--android_llvm_toolchain_dir',
- required=True,
- help='Up-to-date android-llvm-toolchain directory to use.')
-
- return parser.parse_args(argv)
-
-
-def find_chroot(opts: t.Any, reviewers: t.List[str], cc: t.List[str]
- ) -> t.Tuple[str, t.List[t.Tuple[str, str]], _EmailRecipients]:
- recipients = reviewers + cc
- if opts.repository == 'chromeos':
- chroot_path = opts.chromeos_dir
- return (chroot_path, _find_interesting_chromeos_shas(chroot_path),
- _EmailRecipients(well_known=['mage'], direct=recipients))
- elif opts.repository == 'android':
- if opts.action == 'cherry-pick':
- raise RuntimeError(
- "android doesn't currently support automatic cherry-picking.")
-
- chroot_path = opts.android_llvm_toolchain_dir
- return (chroot_path, _find_interesting_android_shas(chroot_path),
- _EmailRecipients(well_known=[],
- direct=['android-llvm-dev@google.com'] +
- recipients))
- else:
- raise ValueError(f'Unknown repository {opts.repository}')
+ android_subparser = subparsers.add_parser("android")
+ android_subparser.add_argument(
+ "--android_llvm_toolchain_dir",
+ required=True,
+ help="Up-to-date android-llvm-toolchain directory to use.",
+ )
+
+ return parser.parse_args(argv)
+
+
+def find_chroot(
+ opts: t.Any, reviewers: t.List[str], cc: t.List[str]
+) -> t.Tuple[str, t.List[t.Tuple[str, str]], _EmailRecipients]:
+ recipients = reviewers + cc
+ if opts.repository == "chromeos":
+ chroot_path = opts.chromeos_dir
+ return (
+ chroot_path,
+ _find_interesting_chromeos_shas(chroot_path),
+ _EmailRecipients(well_known=["mage"], direct=recipients),
+ )
+ elif opts.repository == "android":
+ if opts.action == "cherry-pick":
+ raise RuntimeError(
+ "android doesn't currently support automatic cherry-picking."
+ )
+
+ chroot_path = opts.android_llvm_toolchain_dir
+ return (
+ chroot_path,
+ _find_interesting_android_shas(chroot_path),
+ _EmailRecipients(
+ well_known=[],
+ direct=["android-llvm-dev@google.com"] + recipients,
+ ),
+ )
+ else:
+ raise ValueError(f"Unknown repository {opts.repository}")
def main(argv: t.List[str]) -> int:
- opts = parse_args(argv)
-
- logging.basicConfig(
- format='%(asctime)s: %(levelname)s: %(filename)s:%(lineno)d: %(message)s',
- level=logging.DEBUG if opts.debug else logging.INFO,
- )
-
- action = opts.action
- llvm_dir = opts.llvm_dir
- repository = opts.repository
- state_file = opts.state_file
- reviewers = opts.reviewers if opts.reviewers else []
- cc = opts.cc if opts.cc else []
-
- chroot_path, interesting_shas, recipients = find_chroot(opts, reviewers, cc)
- logging.info('Interesting SHAs were %r', interesting_shas)
-
- state = _read_state(state_file)
- logging.info('Loaded state\n%s', pprint.pformat(state))
-
- # We want to be as free of obvious side-effects as possible in case something
- # above breaks. Hence, action as late as possible.
- if action == 'cherry-pick':
- new_state = do_cherrypick(chroot_path=chroot_path,
- llvm_dir=llvm_dir,
- interesting_shas=interesting_shas,
- state=state,
- reviewers=reviewers,
- cc=cc)
- else:
- new_state = do_email(is_dry_run=action == 'dry-run',
- llvm_dir=llvm_dir,
- repository=repository,
- interesting_shas=interesting_shas,
- state=state,
- recipients=recipients)
-
- _write_state(state_file, new_state)
- return 0
-
-
-if __name__ == '__main__':
- sys.exit(main(sys.argv[1:]))
+ opts = parse_args(argv)
+
+ logging.basicConfig(
+ format="%(asctime)s: %(levelname)s: %(filename)s:%(lineno)d: %(message)s",
+ level=logging.DEBUG if opts.debug else logging.INFO,
+ )
+
+ action = opts.action
+ llvm_dir = opts.llvm_dir
+ repository = opts.repository
+ state_file = opts.state_file
+ reviewers = opts.reviewers if opts.reviewers else []
+ cc = opts.cc if opts.cc else []
+
+ chroot_path, interesting_shas, recipients = find_chroot(opts, reviewers, cc)
+ logging.info("Interesting SHAs were %r", interesting_shas)
+
+ state = _read_state(state_file)
+ logging.info("Loaded state\n%s", pprint.pformat(state))
+
+ # We want to be as free of obvious side-effects as possible in case something
+ # above breaks. Hence, action as late as possible.
+ if action == "cherry-pick":
+ new_state = do_cherrypick(
+ chroot_path=chroot_path,
+ llvm_dir=llvm_dir,
+ interesting_shas=interesting_shas,
+ state=state,
+ reviewers=reviewers,
+ cc=cc,
+ )
+ else:
+ new_state = do_email(
+ is_dry_run=action == "dry-run",
+ llvm_dir=llvm_dir,
+ repository=repository,
+ interesting_shas=interesting_shas,
+ state=state,
+ recipients=recipients,
+ )
+
+ _write_state(state_file, new_state)
+ return 0
+
+
+if __name__ == "__main__":
+ sys.exit(main(sys.argv[1:]))
diff --git a/llvm_tools/nightly_revert_checker_test.py b/llvm_tools/nightly_revert_checker_test.py
index a8ab4195..86b7898a 100755
--- a/llvm_tools/nightly_revert_checker_test.py
+++ b/llvm_tools/nightly_revert_checker_test.py
@@ -1,12 +1,11 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for nightly_revert_checker."""
-from __future__ import print_function
import io
import unittest
@@ -17,181 +16,207 @@ import get_upstream_patch
import nightly_revert_checker
import revert_checker
+
# pylint: disable=protected-access
class Test(unittest.TestCase):
- """Tests for nightly_revert_checker."""
-
- def test_email_rendering_works_for_singular_revert(self):
-
- def prettify_sha(sha: str) -> tiny_render.Piece:
- return 'pretty_' + sha
-
- def get_sha_description(sha: str) -> tiny_render.Piece:
- return 'subject_' + sha
-
- email = nightly_revert_checker._generate_revert_email(
- repository_name='${repo}',
- friendly_name='${name}',
- sha='${sha}',
- prettify_sha=prettify_sha,
- get_sha_description=get_sha_description,
- new_reverts=[
- revert_checker.Revert(
- sha='${revert_sha}', reverted_sha='${reverted_sha}')
- ])
-
- expected_email = nightly_revert_checker._Email(
- subject='[revert-checker/${repo}] new revert discovered across ${name}',
- body=[
- 'It looks like there may be a new revert across ${name} (',
- 'pretty_${sha}',
- ').',
- tiny_render.line_break,
- tiny_render.line_break,
- 'That is:',
- tiny_render.UnorderedList([[
- 'pretty_${revert_sha}',
- ' (appears to revert ',
- 'pretty_${reverted_sha}',
- '): ',
- 'subject_${revert_sha}',
- ]]),
- tiny_render.line_break,
- 'PTAL and consider reverting them locally.',
- ])
-
- self.assertEqual(email, expected_email)
-
- def test_email_rendering_works_for_multiple_reverts(self):
-
- def prettify_sha(sha: str) -> tiny_render.Piece:
- return 'pretty_' + sha
-
- def get_sha_description(sha: str) -> tiny_render.Piece:
- return 'subject_' + sha
-
- email = nightly_revert_checker._generate_revert_email(
- repository_name='${repo}',
- friendly_name='${name}',
- sha='${sha}',
- prettify_sha=prettify_sha,
- get_sha_description=get_sha_description,
- new_reverts=[
- revert_checker.Revert(
- sha='${revert_sha1}', reverted_sha='${reverted_sha1}'),
- revert_checker.Revert(
- sha='${revert_sha2}', reverted_sha='${reverted_sha2}'),
- # Keep this out-of-order to check that we sort based on SHAs
- revert_checker.Revert(
- sha='${revert_sha0}', reverted_sha='${reverted_sha0}'),
- ])
-
- expected_email = nightly_revert_checker._Email(
- subject='[revert-checker/${repo}] new reverts discovered across '
- '${name}',
- body=[
- 'It looks like there may be new reverts across ${name} (',
- 'pretty_${sha}',
- ').',
- tiny_render.line_break,
- tiny_render.line_break,
- 'These are:',
- tiny_render.UnorderedList([
- [
- 'pretty_${revert_sha0}',
- ' (appears to revert ',
- 'pretty_${reverted_sha0}',
- '): ',
- 'subject_${revert_sha0}',
- ],
- [
- 'pretty_${revert_sha1}',
- ' (appears to revert ',
- 'pretty_${reverted_sha1}',
- '): ',
- 'subject_${revert_sha1}',
- ],
- [
- 'pretty_${revert_sha2}',
- ' (appears to revert ',
- 'pretty_${reverted_sha2}',
- '): ',
- 'subject_${revert_sha2}',
- ],
- ]),
- tiny_render.line_break,
- 'PTAL and consider reverting them locally.',
- ])
-
- self.assertEqual(email, expected_email)
-
- def test_llvm_ebuild_parsing_appears_to_function(self):
- llvm_ebuild = io.StringIO('\n'.join((
- 'foo',
- '#LLVM_HASH="123"',
- 'LLVM_HASH="123" # comment',
- 'LLVM_NEXT_HASH="456"',
- )))
-
- shas = nightly_revert_checker._parse_llvm_ebuild_for_shas(llvm_ebuild)
- self.assertEqual(shas, [
- ('llvm', '123'),
- ('llvm-next', '456'),
- ])
-
- def test_llvm_ebuild_parsing_fails_if_both_hashes_arent_present(self):
- bad_bodies = [
- '',
- 'LLVM_HASH="123" # comment',
- 'LLVM_NEXT_HASH="123" # comment',
- 'LLVM_NEXT_HASH="123" # comment\n#LLVM_HASH="123"',
- ]
-
- for bad in bad_bodies:
- with self.assertRaises(ValueError) as e:
- nightly_revert_checker._parse_llvm_ebuild_for_shas(io.StringIO(bad))
-
- self.assertIn('Failed to detect SHAs', str(e.exception))
-
- @patch('revert_checker.find_reverts')
- @patch('get_upstream_patch.get_from_upstream')
- def test_do_cherrypick_is_called(self, do_cherrypick, find_reverts):
- find_reverts.return_value = [
- revert_checker.Revert('12345abcdef', 'fedcba54321')
- ]
- nightly_revert_checker.do_cherrypick(
- chroot_path='/path/to/chroot',
- llvm_dir='/path/to/llvm',
- interesting_shas=[('12345abcdef', 'fedcba54321')],
- state={},
- reviewers=['meow@chromium.org'],
- cc=['purr@chromium.org'])
-
- do_cherrypick.assert_called_once()
- find_reverts.assert_called_once()
-
- @patch('revert_checker.find_reverts')
- @patch('get_upstream_patch.get_from_upstream')
- def test_do_cherrypick_handles_cherrypick_error(self, do_cherrypick,
- find_reverts):
- find_reverts.return_value = [
- revert_checker.Revert('12345abcdef', 'fedcba54321')
- ]
- do_cherrypick.side_effect = get_upstream_patch.CherrypickError(
- 'Patch at 12345abcdef already exists in PATCHES.json')
- nightly_revert_checker.do_cherrypick(
- chroot_path='/path/to/chroot',
- llvm_dir='/path/to/llvm',
- interesting_shas=[('12345abcdef', 'fedcba54321')],
- state={},
- reviewers=['meow@chromium.org'],
- cc=['purr@chromium.org'])
-
- do_cherrypick.assert_called_once()
- find_reverts.assert_called_once()
-
-
-if __name__ == '__main__':
- unittest.main()
+ """Tests for nightly_revert_checker."""
+
+ def test_email_rendering_works_for_singular_revert(self):
+ def prettify_sha(sha: str) -> tiny_render.Piece:
+ return "pretty_" + sha
+
+ def get_sha_description(sha: str) -> tiny_render.Piece:
+ return "subject_" + sha
+
+ email = nightly_revert_checker._generate_revert_email(
+ repository_name="${repo}",
+ friendly_name="${name}",
+ sha="${sha}",
+ prettify_sha=prettify_sha,
+ get_sha_description=get_sha_description,
+ new_reverts=[
+ revert_checker.Revert(
+ sha="${revert_sha}", reverted_sha="${reverted_sha}"
+ )
+ ],
+ )
+
+ expected_email = nightly_revert_checker._Email(
+ subject="[revert-checker/${repo}] new revert discovered across ${name}",
+ body=[
+ "It looks like there may be a new revert across ${name} (",
+ "pretty_${sha}",
+ ").",
+ tiny_render.line_break,
+ tiny_render.line_break,
+ "That is:",
+ tiny_render.UnorderedList(
+ [
+ [
+ "pretty_${revert_sha}",
+ " (appears to revert ",
+ "pretty_${reverted_sha}",
+ "): ",
+ "subject_${revert_sha}",
+ ]
+ ]
+ ),
+ tiny_render.line_break,
+ "PTAL and consider reverting them locally.",
+ ],
+ )
+
+ self.assertEqual(email, expected_email)
+
+ def test_email_rendering_works_for_multiple_reverts(self):
+ def prettify_sha(sha: str) -> tiny_render.Piece:
+ return "pretty_" + sha
+
+ def get_sha_description(sha: str) -> tiny_render.Piece:
+ return "subject_" + sha
+
+ email = nightly_revert_checker._generate_revert_email(
+ repository_name="${repo}",
+ friendly_name="${name}",
+ sha="${sha}",
+ prettify_sha=prettify_sha,
+ get_sha_description=get_sha_description,
+ new_reverts=[
+ revert_checker.Revert(
+ sha="${revert_sha1}", reverted_sha="${reverted_sha1}"
+ ),
+ revert_checker.Revert(
+ sha="${revert_sha2}", reverted_sha="${reverted_sha2}"
+ ),
+ # Keep this out-of-order to check that we sort based on SHAs
+ revert_checker.Revert(
+ sha="${revert_sha0}", reverted_sha="${reverted_sha0}"
+ ),
+ ],
+ )
+
+ expected_email = nightly_revert_checker._Email(
+ subject="[revert-checker/${repo}] new reverts discovered across "
+ "${name}",
+ body=[
+ "It looks like there may be new reverts across ${name} (",
+ "pretty_${sha}",
+ ").",
+ tiny_render.line_break,
+ tiny_render.line_break,
+ "These are:",
+ tiny_render.UnorderedList(
+ [
+ [
+ "pretty_${revert_sha0}",
+ " (appears to revert ",
+ "pretty_${reverted_sha0}",
+ "): ",
+ "subject_${revert_sha0}",
+ ],
+ [
+ "pretty_${revert_sha1}",
+ " (appears to revert ",
+ "pretty_${reverted_sha1}",
+ "): ",
+ "subject_${revert_sha1}",
+ ],
+ [
+ "pretty_${revert_sha2}",
+ " (appears to revert ",
+ "pretty_${reverted_sha2}",
+ "): ",
+ "subject_${revert_sha2}",
+ ],
+ ]
+ ),
+ tiny_render.line_break,
+ "PTAL and consider reverting them locally.",
+ ],
+ )
+
+ self.assertEqual(email, expected_email)
+
+ def test_llvm_ebuild_parsing_appears_to_function(self):
+ llvm_ebuild = io.StringIO(
+ "\n".join(
+ (
+ "foo",
+ '#LLVM_HASH="123"',
+ 'LLVM_HASH="123" # comment',
+ 'LLVM_NEXT_HASH="456"',
+ )
+ )
+ )
+
+ shas = nightly_revert_checker._parse_llvm_ebuild_for_shas(llvm_ebuild)
+ self.assertEqual(
+ shas,
+ [
+ ("llvm", "123"),
+ ("llvm-next", "456"),
+ ],
+ )
+
+ def test_llvm_ebuild_parsing_fails_if_both_hashes_arent_present(self):
+ bad_bodies = [
+ "",
+ 'LLVM_HASH="123" # comment',
+ 'LLVM_NEXT_HASH="123" # comment',
+ 'LLVM_NEXT_HASH="123" # comment\n#LLVM_HASH="123"',
+ ]
+
+ for bad in bad_bodies:
+ with self.assertRaises(ValueError) as e:
+ nightly_revert_checker._parse_llvm_ebuild_for_shas(
+ io.StringIO(bad)
+ )
+
+ self.assertIn("Failed to detect SHAs", str(e.exception))
+
+ @patch("revert_checker.find_reverts")
+ @patch("get_upstream_patch.get_from_upstream")
+ def test_do_cherrypick_is_called(self, do_cherrypick, find_reverts):
+ find_reverts.return_value = [
+ revert_checker.Revert("12345abcdef", "fedcba54321")
+ ]
+ nightly_revert_checker.do_cherrypick(
+ chroot_path="/path/to/chroot",
+ llvm_dir="/path/to/llvm",
+ interesting_shas=[("12345abcdef", "fedcba54321")],
+ state={},
+ reviewers=["meow@chromium.org"],
+ cc=["purr@chromium.org"],
+ )
+
+ do_cherrypick.assert_called_once()
+ find_reverts.assert_called_once()
+
+ @patch("revert_checker.find_reverts")
+ @patch("get_upstream_patch.get_from_upstream")
+ def test_do_cherrypick_handles_cherrypick_error(
+ self, do_cherrypick, find_reverts
+ ):
+ find_reverts.return_value = [
+ revert_checker.Revert("12345abcdef", "fedcba54321")
+ ]
+ do_cherrypick.side_effect = get_upstream_patch.CherrypickError(
+ "Patch at 12345abcdef already exists in PATCHES.json"
+ )
+ nightly_revert_checker.do_cherrypick(
+ chroot_path="/path/to/chroot",
+ llvm_dir="/path/to/llvm",
+ interesting_shas=[("12345abcdef", "fedcba54321")],
+ state={},
+ reviewers=["meow@chromium.org"],
+ cc=["purr@chromium.org"],
+ )
+
+ do_cherrypick.assert_called_once()
+ find_reverts.assert_called_once()
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/llvm_tools/patch_manager.py b/llvm_tools/patch_manager.py
index f2d6b322..4d4e8385 100755
--- a/llvm_tools/patch_manager.py
+++ b/llvm_tools/patch_manager.py
@@ -1,755 +1,305 @@
#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A manager for patches."""
-from __future__ import print_function
-
import argparse
-import json
+import enum
import os
-import subprocess
+from pathlib import Path
import sys
-from collections import namedtuple
+from typing import Iterable, List, Optional, Tuple
-import get_llvm_hash
from failure_modes import FailureModes
-from subprocess_helpers import check_call
+import get_llvm_hash
+import patch_utils
from subprocess_helpers import check_output
-def is_directory(dir_path):
- """Validates that the argument passed into 'argparse' is a directory."""
-
- if not os.path.isdir(dir_path):
- raise ValueError('Path is not a directory: %s' % dir_path)
-
- return dir_path
-
-
-def is_patch_metadata_file(patch_metadata_file):
- """Valides the argument into 'argparse' is a patch file."""
-
- if not os.path.isfile(patch_metadata_file):
- raise ValueError(
- 'Invalid patch metadata file provided: %s' % patch_metadata_file)
-
- if not patch_metadata_file.endswith('.json'):
- raise ValueError(
- 'Patch metadata file does not end in ".json": %s' % patch_metadata_file)
-
- return patch_metadata_file
-
-
-def is_valid_failure_mode(failure_mode):
- """Validates that the failure mode passed in is correct."""
-
- cur_failure_modes = [mode.value for mode in FailureModes]
-
- if failure_mode not in cur_failure_modes:
- raise ValueError('Invalid failure mode provided: %s' % failure_mode)
-
- return failure_mode
-
-
-def EnsureBisectModeAndSvnVersionAreSpecifiedTogether(failure_mode,
- good_svn_version):
- """Validates that 'good_svn_version' is passed in only for bisection."""
-
- if failure_mode != FailureModes.BISECT_PATCHES.value and good_svn_version:
- raise ValueError('"good_svn_version" is only available for bisection.')
- elif failure_mode == FailureModes.BISECT_PATCHES.value and \
- not good_svn_version:
- raise ValueError('A good SVN version is required for bisection (used by'
- '"git bisect start".')
-
-
-def GetCommandLineArgs():
- """Get the required arguments from the command line."""
-
- # Create parser and add optional command-line arguments.
- parser = argparse.ArgumentParser(description='A manager for patches.')
-
- # Add argument for the last good SVN version which is required by
- # `git bisect start` (only valid for bisection mode).
- parser.add_argument(
- '--good_svn_version',
- type=int,
- help='INTERNAL USE ONLY... (used for bisection.)')
-
- # Add argument for the number of patches it iterate. Only used when performing
- # `git bisect run`.
- parser.add_argument(
- '--num_patches_to_iterate', type=int, help=argparse.SUPPRESS)
-
- # Add argument for whether bisection should continue. Only used for
- # 'bisect_patches.'
- parser.add_argument(
- '--continue_bisection',
- type=bool,
- default=False,
- help='Determines whether bisection should continue after successfully '
- 'bisecting a patch (default: %(default)s) - only used for '
- '"bisect_patches"')
-
- # Trust src_path HEAD and svn_version.
- parser.add_argument(
- '--use_src_head',
- action='store_true',
- help='Use the HEAD of src_path directory as is, not necessarily the same '
- 'as the svn_version of upstream.')
-
- # Add argument for the LLVM version to use for patch management.
- parser.add_argument(
- '--svn_version',
- type=int,
- required=True,
- help='the LLVM svn version to use for patch management (determines '
- 'whether a patch is applicable)')
-
- # Add argument for the patch metadata file that is in $FILESDIR.
- parser.add_argument(
- '--patch_metadata_file',
- required=True,
- type=is_patch_metadata_file,
- help='the absolute path to the .json file in "$FILESDIR/" of the '
- 'package which has all the patches and their metadata if applicable')
-
- # Add argument for the absolute path to the ebuild's $FILESDIR path.
- # Example: '.../sys-devel/llvm/files/'.
- parser.add_argument(
- '--filesdir_path',
- required=True,
- type=is_directory,
- help='the absolute path to the ebuild "files/" directory')
-
- # Add argument for the absolute path to the unpacked sources.
- parser.add_argument(
- '--src_path',
- required=True,
- type=is_directory,
- help='the absolute path to the unpacked LLVM sources')
-
- # Add argument for the mode of the patch manager when handling failing
- # applicable patches.
- parser.add_argument(
- '--failure_mode',
- default=FailureModes.FAIL.value,
- type=is_valid_failure_mode,
- help='the mode of the patch manager when handling failed patches ' \
- '(default: %(default)s)')
-
- # Parse the command line.
- args_output = parser.parse_args()
-
- EnsureBisectModeAndSvnVersionAreSpecifiedTogether(
- args_output.failure_mode, args_output.good_svn_version)
-
- return args_output
+class GitBisectionCode(enum.IntEnum):
+ """Git bisection exit codes.
+
+ Used when patch_manager.py is in the bisection mode,
+ as we need to return in what way we should handle
+ certain patch failures.
+ """
+
+ GOOD = 0
+ """All patches applied successfully."""
+ BAD = 1
+ """The tested patch failed to apply."""
+ SKIP = 125
+
+
+def GetCommandLineArgs(sys_argv: Optional[List[str]]):
+ """Get the required arguments from the command line."""
+
+ # Create parser and add optional command-line arguments.
+ parser = argparse.ArgumentParser(description="A manager for patches.")
+
+ # Add argument for the LLVM version to use for patch management.
+ parser.add_argument(
+ "--svn_version",
+ type=int,
+ help="the LLVM svn version to use for patch management (determines "
+ "whether a patch is applicable). Required when not bisecting.",
+ )
+
+ # Add argument for the patch metadata file that is in $FILESDIR.
+ parser.add_argument(
+ "--patch_metadata_file",
+ required=True,
+ type=Path,
+ help='the absolute path to the .json file in "$FILESDIR/" of the '
+ "package which has all the patches and their metadata if applicable",
+ )
+
+ # Add argument for the absolute path to the unpacked sources.
+ parser.add_argument(
+ "--src_path",
+ required=True,
+ type=Path,
+ help="the absolute path to the unpacked LLVM sources",
+ )
+
+ # Add argument for the mode of the patch manager when handling failing
+ # applicable patches.
+ parser.add_argument(
+ "--failure_mode",
+ default=FailureModes.FAIL,
+ type=FailureModes,
+ help="the mode of the patch manager when handling failed patches "
+ "(default: %(default)s)",
+ )
+ parser.add_argument(
+ "--test_patch",
+ default="",
+ help="The rel_patch_path of the patch we want to bisect the "
+ "application of. Not used in other modes.",
+ )
+
+ # Parse the command line.
+ return parser.parse_args(sys_argv)
def GetHEADSVNVersion(src_path):
- """Gets the SVN version of HEAD in the src tree."""
-
- cmd = ['git', '-C', src_path, 'rev-parse', 'HEAD']
-
- git_hash = check_output(cmd)
-
- version = get_llvm_hash.GetVersionFrom(src_path, git_hash.rstrip())
-
- return version
-
-
-def VerifyHEADIsTheSameAsSVNVersion(src_path, svn_version):
- """Verifies that HEAD's SVN version matches 'svn_version'."""
-
- head_svn_version = GetHEADSVNVersion(src_path)
-
- if head_svn_version != svn_version:
- raise ValueError('HEAD\'s SVN version %d does not match "svn_version"'
- ' %d, please move HEAD to "svn_version"s\' git hash.' %
- (head_svn_version, svn_version))
-
-
-def GetPathToPatch(filesdir_path, rel_patch_path):
- """Gets the absolute path to a patch in $FILESDIR.
-
- Args:
- filesdir_path: The absolute path to $FILESDIR.
- rel_patch_path: The relative path to the patch in '$FILESDIR/'.
-
- Returns:
- The absolute path to the patch in $FILESDIR.
-
- Raises:
- ValueError: Unable to find the path to the patch in $FILESDIR.
- """
-
- if not os.path.isdir(filesdir_path):
- raise ValueError('Invalid path to $FILESDIR provided: %s' % filesdir_path)
-
- # Combine $FILESDIR + relative path of patch to $FILESDIR.
- patch_path = os.path.join(filesdir_path, rel_patch_path)
-
- if not os.path.isfile(patch_path):
- raise ValueError('The absolute path %s to the patch %s does not exist' %
- (patch_path, rel_patch_path))
-
- return patch_path
-
-
-def GetPatchMetadata(patch_dict):
- """Gets the patch's metadata.
-
- Args:
- patch_dict: A dictionary that has the patch metadata.
-
- Returns:
- A tuple that contains the metadata values.
- """
-
- # Get the metadata values of a patch if possible.
- # FIXME(b/221489531): Remove start_version & end_version
- if 'version_range' in patch_dict:
- start_version = patch_dict['version_range'].get('from', 0)
- end_version = patch_dict['version_range'].get('until', None)
- else:
- start_version = patch_dict.get('start_version', 0)
- end_version = patch_dict.get('end_version', None)
- is_critical = patch_dict.get('is_critical', False)
-
- return start_version, end_version, is_critical
-
+ """Gets the SVN version of HEAD in the src tree."""
-def ApplyPatch(src_path, patch_path):
- """Attempts to apply the patch.
+ cmd = ["git", "-C", src_path, "rev-parse", "HEAD"]
- Args:
- src_path: The absolute path to the unpacked sources of the package.
- patch_path: The absolute path to the patch in $FILESDIR/
+ git_hash = check_output(cmd)
- Returns:
- A boolean where 'True' means that the patch applied fine or 'False' means
- that the patch failed to apply.
- """
+ version = get_llvm_hash.GetVersionFrom(src_path, git_hash.rstrip())
- if not os.path.isdir(src_path):
- raise ValueError('Invalid src path provided: %s' % src_path)
-
- if not os.path.isfile(patch_path):
- raise ValueError('Invalid patch file provided: %s' % patch_path)
-
- # Test the patch with '--dry-run' before actually applying the patch.
- test_patch_cmd = [
- 'patch', '--dry-run', '-d', src_path, '-f', '-p1', '-E',
- '--no-backup-if-mismatch', '-i', patch_path
- ]
-
- # Cmd to apply a patch in the src unpack path.
- apply_patch_cmd = [
- 'patch', '-d', src_path, '-f', '-p1', '-E', '--no-backup-if-mismatch',
- '-i', patch_path
- ]
-
- try:
- check_output(test_patch_cmd)
-
- # If the mode is 'continue', then catching the exception makes sure that
- # the program does not exit on the first failed applicable patch.
- except subprocess.CalledProcessError:
- # Test run on the patch failed to apply.
- return False
-
- # Test run succeeded on the patch.
- check_output(apply_patch_cmd)
-
- return True
-
-
-def UpdatePatchMetadataFile(patch_metadata_file, patches):
- """Updates the .json file with unchanged and at least one changed patch.
-
- Args:
- patch_metadata_file: The absolute path to the .json file that has all the
- patches and its metadata.
- patches: A list of patches whose metadata were or were not updated.
-
- Raises:
- ValueError: The patch metadata file does not have the correct extension.
- """
-
- if not patch_metadata_file.endswith('.json'):
- raise ValueError('File does not end in ".json": %s' % patch_metadata_file)
-
- with open(patch_metadata_file, 'w') as patch_file:
- json.dump(patches, patch_file, indent=4, separators=(',', ': '))
+ return version
def GetCommitHashesForBisection(src_path, good_svn_version, bad_svn_version):
- """Gets the good and bad commit hashes required by `git bisect start`."""
-
- bad_commit_hash = get_llvm_hash.GetGitHashFrom(src_path, bad_svn_version)
-
- good_commit_hash = get_llvm_hash.GetGitHashFrom(src_path, good_svn_version)
-
- return good_commit_hash, bad_commit_hash
-
-
-def PerformBisection(src_path, good_commit, bad_commit, svn_version,
- patch_metadata_file, filesdir_path, num_patches):
- """Performs bisection to determine where a patch stops applying."""
-
- start_cmd = [
- 'git', '-C', src_path, 'bisect', 'start', bad_commit, good_commit
- ]
-
- check_output(start_cmd)
-
- run_cmd = [
- 'git', '-C', src_path, 'bisect', 'run',
- os.path.abspath(__file__), '--svn_version',
- '%d' % svn_version, '--patch_metadata_file', patch_metadata_file,
- '--filesdir_path', filesdir_path, '--src_path', src_path,
- '--failure_mode', 'internal_bisection', '--num_patches_to_iterate',
- '%d' % num_patches
- ]
-
- check_call(run_cmd)
-
- # Successfully bisected the patch, so retrieve the SVN version from the
- # commit message.
- get_bad_commit_hash_cmd = [
- 'git', '-C', src_path, 'rev-parse', 'refs/bisect/bad'
- ]
-
- git_hash = check_output(get_bad_commit_hash_cmd)
-
- end_cmd = ['git', '-C', src_path, 'bisect', 'reset']
-
- check_output(end_cmd)
-
- # `git bisect run` returns the bad commit hash and the commit message.
- version = get_llvm_hash.GetVersionFrom(src_path, git_hash.rstrip())
-
- return version
-
-
-def CleanSrcTree(src_path):
- """Cleans the source tree of the changes made in 'src_path'."""
-
- reset_src_tree_cmd = ['git', '-C', src_path, 'reset', 'HEAD', '--hard']
-
- check_output(reset_src_tree_cmd)
-
- clean_src_tree_cmd = ['git', '-C', src_path, 'clean', '-fd']
-
- check_output(clean_src_tree_cmd)
-
-
-def SaveSrcTreeState(src_path):
- """Stashes the changes made so far to the source tree."""
-
- save_src_tree_cmd = ['git', '-C', src_path, 'stash', '-a']
-
- check_output(save_src_tree_cmd)
-
-
-def RestoreSrcTreeState(src_path, bad_commit_hash):
- """Restores the changes made to the source tree."""
-
- checkout_cmd = ['git', '-C', src_path, 'checkout', bad_commit_hash]
-
- check_output(checkout_cmd)
-
- get_changes_cmd = ['git', '-C', src_path, 'stash', 'pop']
-
- check_output(get_changes_cmd)
-
-
-def HandlePatches(svn_version,
- patch_metadata_file,
- filesdir_path,
- src_path,
- mode,
- good_svn_version=None,
- num_patches_to_iterate=None,
- continue_bisection=False):
- """Handles the patches in the .json file for the package.
-
- Args:
- svn_version: The LLVM version to use for patch management.
- patch_metadata_file: The absolute path to the .json file in '$FILESDIR/'
- that has all the patches and their metadata.
- filesdir_path: The absolute path to $FILESDIR.
- src_path: The absolute path to the unpacked destination of the package.
- mode: The action to take when an applicable patch failed to apply.
- Ex: 'FailureModes.FAIL'
- good_svn_version: Only used by 'bisect_patches' which tells
- `git bisect start` the good version.
- num_patches_to_iterate: The number of patches to iterate in the .JSON file
- (internal use). Only used by `git bisect run`.
- continue_bisection: Only used for 'bisect_patches' mode. If flag is set,
- then bisection will continue to the next patch when successfully bisected a
- patch.
-
- Returns:
- Depending on the mode, 'None' would be returned if everything went well or
- the .json file was not updated. Otherwise, a list or multiple lists would
- be returned that indicates what has changed.
-
- Raises:
- ValueError: The patch metadata file does not exist or does not end with
- '.json' or the absolute path to $FILESDIR does not exist or the unpacked
- path does not exist or if the mode is 'fail', then an applicable patch
- failed to apply.
- """
-
- # A flag for whether the mode specified would possible modify the patches.
- can_modify_patches = False
-
- # 'fail' or 'continue' mode would not modify a patch's metadata, so the .json
- # file would stay the same.
- if mode != FailureModes.FAIL and mode != FailureModes.CONTINUE:
- can_modify_patches = True
-
- # A flag that determines whether at least one patch's metadata was
- # updated due to the mode that is passed in.
- updated_patch = False
-
- # A list of patches that will be in the updated .json file.
- applicable_patches = []
-
- # A list of patches that successfully applied.
- applied_patches = []
-
- # A list of patches that were disabled.
- disabled_patches = []
-
- # A list of bisected patches.
- bisected_patches = []
-
- # A list of non applicable patches.
- non_applicable_patches = []
-
- # A list of patches that will not be included in the updated .json file
- removed_patches = []
-
- # Whether the patch metadata file was modified where 'None' means that the
- # patch metadata file was not modified otherwise the absolute path to the
- # patch metadata file is stored.
- modified_metadata = None
-
- # A list of patches that failed to apply.
- failed_patches = []
-
- with open(patch_metadata_file) as patch_file:
- patch_file_contents = json.load(patch_file)
-
- if mode == FailureModes.BISECT_PATCHES:
- # A good and bad commit are required by `git bisect start`.
- good_commit, bad_commit = GetCommitHashesForBisection(
- src_path, good_svn_version, svn_version)
-
- # Patch format:
- # {
- # "rel_patch_path" : "[REL_PATCH_PATH_FROM_$FILESDIR]"
- # [PATCH_METADATA] if available.
- # }
- #
- # For each patch, find the path to it in $FILESDIR and get its metadata if
- # available, then check if the patch is applicable.
- for patch_dict_index, cur_patch_dict in enumerate(patch_file_contents):
- # Used by the internal bisection. All the patches in the interval [0, N]
- # have been iterated.
- if num_patches_to_iterate and \
- (patch_dict_index + 1) > num_patches_to_iterate:
- break
-
- # Get the absolute path to the patch in $FILESDIR.
- path_to_patch = GetPathToPatch(filesdir_path,
- cur_patch_dict['rel_patch_path'])
-
- # Get the patch's metadata.
- #
- # Index information of 'patch_metadata':
- # [0]: start_version
- # [1]: end_version
- # [2]: is_critical
- patch_metadata = GetPatchMetadata(cur_patch_dict)
-
- if not patch_metadata[1]:
- # Patch does not have an 'end_version' value which implies 'end_version'
- # == 'inf' ('svn_version' will always be less than 'end_version'), so
- # the patch is applicable if 'svn_version' >= 'start_version'.
- patch_applicable = svn_version >= patch_metadata[0]
- else:
- # Patch is applicable if 'svn_version' >= 'start_version' &&
- # "svn_version" < "end_version".
- patch_applicable = (svn_version >= patch_metadata[0] and \
- svn_version < patch_metadata[1])
-
- if can_modify_patches:
- # Add to the list only if the mode can potentially modify a patch.
- #
- # If the mode is 'remove_patches', then all patches that are
- # applicable or are from the future will be added to the updated .json
- # file and all patches that are not applicable will be added to the
- # remove patches list which will not be included in the updated .json
- # file.
- if patch_applicable or svn_version < patch_metadata[0] or \
- mode != FailureModes.REMOVE_PATCHES:
- applicable_patches.append(cur_patch_dict)
- elif mode == FailureModes.REMOVE_PATCHES:
- removed_patches.append(path_to_patch)
-
- if not modified_metadata:
- # At least one patch will be removed from the .json file.
- modified_metadata = patch_metadata_file
-
- if not patch_applicable:
- non_applicable_patches.append(os.path.basename(path_to_patch))
-
- # There is no need to apply patches in 'remove_patches' mode because the
- # mode removes patches that do not apply anymore based off of
- # 'svn_version.'
- if patch_applicable and mode != FailureModes.REMOVE_PATCHES:
- patch_applied = ApplyPatch(src_path, path_to_patch)
-
- if not patch_applied: # Failed to apply patch.
- failed_patches.append(os.path.basename(path_to_patch))
-
- # Check the mode to determine what action to take on the failing
- # patch.
- if mode == FailureModes.DISABLE_PATCHES:
- # Set the patch's 'end_version' to 'svn_version' so the patch
- # would not be applicable anymore (i.e. the patch's 'end_version'
- # would not be greater than 'svn_version').
-
- # Last element in 'applicable_patches' is the current patch.
- applicable_patches[-1]['end_version'] = svn_version
-
- disabled_patches.append(os.path.basename(path_to_patch))
-
- if not updated_patch:
- # At least one patch has been modified, so the .json file
- # will be updated with the new patch metadata.
- updated_patch = True
-
- modified_metadata = patch_metadata_file
- elif mode == FailureModes.BISECT_PATCHES:
- # Figure out where the patch's stops applying and set the patch's
- # 'end_version' to that version.
-
- # Do not want to overwrite the changes to the current progress of
- # 'bisect_patches' on the source tree.
- SaveSrcTreeState(src_path)
-
- # Need a clean source tree for `git bisect run` to avoid unnecessary
- # fails for patches.
- CleanSrcTree(src_path)
-
- print('\nStarting to bisect patch %s for SVN version %d:\n' %
- (os.path.basename(cur_patch_dict['rel_patch_path']),
- svn_version))
-
- # Performs the bisection: calls `git bisect start` and
- # `git bisect run`, where `git bisect run` is going to call this
- # script as many times as needed with specific arguments.
- bad_svn_version = PerformBisection(
- src_path, good_commit, bad_commit, svn_version,
- patch_metadata_file, filesdir_path, patch_dict_index + 1)
-
- print('\nSuccessfully bisected patch %s, starts to fail to apply '
- 'at %d\n' % (os.path.basename(
- cur_patch_dict['rel_patch_path']), bad_svn_version))
-
- # Overwrite the .JSON file with the new 'end_version' for the
- # current failed patch so that if there are other patches that
- # fail to apply, then the 'end_version' for the current patch could
- # be applicable when `git bisect run` is performed on the next
- # failed patch because the same .JSON file is used for `git bisect
- # run`.
- patch_file_contents[patch_dict_index][
- 'end_version'] = bad_svn_version
- UpdatePatchMetadataFile(patch_metadata_file, patch_file_contents)
-
- # Clear the changes made to the source tree by `git bisect run`.
- CleanSrcTree(src_path)
-
- if not continue_bisection:
- # Exiting program early because 'continue_bisection' is not set.
- sys.exit(0)
-
- bisected_patches.append(
- '%s starts to fail to apply at %d' % (os.path.basename(
- cur_patch_dict['rel_patch_path']), bad_svn_version))
-
- # Continue where 'bisect_patches' left off.
- RestoreSrcTreeState(src_path, bad_commit)
-
- if not modified_metadata:
- # At least one patch's 'end_version' has been updated.
- modified_metadata = patch_metadata_file
-
- elif mode == FailureModes.FAIL:
- if applied_patches:
- print('The following patches applied successfully up to the '
- 'failed patch:')
- print('\n'.join(applied_patches))
-
- # Throw an exception on the first patch that failed to apply.
+ """Gets the good and bad commit hashes required by `git bisect start`."""
+
+ bad_commit_hash = get_llvm_hash.GetGitHashFrom(src_path, bad_svn_version)
+
+ good_commit_hash = get_llvm_hash.GetGitHashFrom(src_path, good_svn_version)
+
+ return good_commit_hash, bad_commit_hash
+
+
+def CheckPatchApplies(
+ svn_version: int,
+ llvm_src_dir: Path,
+ patches_json_fp: Path,
+ rel_patch_path: str,
+) -> GitBisectionCode:
+ """Check that a given patch with the rel_patch_path applies in the stack.
+
+ This is used in the bisection mode of the patch manager. It's similiar
+ to ApplyAllFromJson, but differs in that the patch with rel_patch_path
+ will attempt to apply regardless of its version range, as we're trying
+ to identify the SVN version
+
+ Args:
+ svn_version: SVN version to test at.
+ llvm_src_dir: llvm-project source code diroctory (with a .git).
+ patches_json_fp: PATCHES.json filepath.
+ rel_patch_path: Relative patch path of the patch we want to check. If
+ patches before this patch fail to apply, then the revision is skipped.
+ """
+ with patches_json_fp.open(encoding="utf-8") as f:
+ patch_entries = patch_utils.json_to_patch_entries(
+ patches_json_fp.parent,
+ f,
+ )
+ with patch_utils.git_clean_context(llvm_src_dir):
+ success, _, failed_patches = ApplyPatchAndPrior(
+ svn_version,
+ llvm_src_dir,
+ patch_entries,
+ rel_patch_path,
+ )
+ if success:
+ # Everything is good, patch applied successfully.
+ print(f"SUCCEEDED applying {rel_patch_path} @ r{svn_version}")
+ return GitBisectionCode.GOOD
+ if failed_patches and failed_patches[-1].rel_patch_path == rel_patch_path:
+ # We attempted to apply this patch, but it failed.
+ print(f"FAILED to apply {rel_patch_path} @ r{svn_version}")
+ return GitBisectionCode.BAD
+ # Didn't attempt to apply the patch, but failed regardless.
+ # Skip this revision.
+ print(f"SKIPPED {rel_patch_path} @ r{svn_version} due to prior failures")
+ return GitBisectionCode.SKIP
+
+
+def ApplyPatchAndPrior(
+ svn_version: int,
+ src_dir: Path,
+ patch_entries: Iterable[patch_utils.PatchEntry],
+ rel_patch_path: str,
+) -> Tuple[bool, List[patch_utils.PatchEntry], List[patch_utils.PatchEntry]]:
+ """Apply a patch, and all patches that apply before it in the patch stack.
+
+ Patches which did not attempt to apply (because their version range didn't
+ match and they weren't the patch of interest) do not appear in the output.
+
+ Probably shouldn't be called from outside of CheckPatchApplies, as it modifies
+ the source dir contents.
+
+ Returns:
+ A tuple where:
+ [0]: Did the patch of interest succeed in applying?
+ [1]: List of applied patches, potentially containing the patch of interest.
+ [2]: List of failing patches, potentially containing the patch of interest.
+ """
+ failed_patches = []
+ applied_patches = []
+ # We have to apply every patch up to the one we care about,
+ # as patches can stack.
+ for pe in patch_entries:
+ is_patch_of_interest = pe.rel_patch_path == rel_patch_path
+ applied, failed_hunks = patch_utils.apply_single_patch_entry(
+ svn_version, src_dir, pe, ignore_version_range=is_patch_of_interest
+ )
+ meant_to_apply = bool(failed_hunks) or is_patch_of_interest
+ if is_patch_of_interest:
+ if applied:
+ # We applied the patch we wanted to, we can stop.
+ applied_patches.append(pe)
+ return True, applied_patches, failed_patches
+ else:
+ # We failed the patch we cared about, we can stop.
+ failed_patches.append(pe)
+ return False, applied_patches, failed_patches
+ else:
+ if applied:
+ applied_patches.append(pe)
+ elif meant_to_apply:
+ # Broke before we reached the patch we cared about. Stop.
+ failed_patches.append(pe)
+ return False, applied_patches, failed_patches
+ raise ValueError(f"Did not find patch {rel_patch_path}. " "Does it exist?")
+
+
+def PrintPatchResults(patch_info: patch_utils.PatchInfo):
+ """Prints the results of handling the patches of a package.
+
+ Args:
+ patch_info: A dataclass that has information on the patches.
+ """
+
+ def _fmt(patches):
+ return (str(pe.patch_path()) for pe in patches)
+
+ if patch_info.applied_patches:
+ print("\nThe following patches applied successfully:")
+ print("\n".join(_fmt(patch_info.applied_patches)))
+
+ if patch_info.failed_patches:
+ print("\nThe following patches failed to apply:")
+ print("\n".join(_fmt(patch_info.failed_patches)))
+
+ if patch_info.non_applicable_patches:
+ print("\nThe following patches were not applicable:")
+ print("\n".join(_fmt(patch_info.non_applicable_patches)))
+
+ if patch_info.modified_metadata:
+ print(
+ "\nThe patch metadata file %s has been modified"
+ % os.path.basename(patch_info.modified_metadata)
+ )
+
+ if patch_info.disabled_patches:
+ print("\nThe following patches were disabled:")
+ print("\n".join(_fmt(patch_info.disabled_patches)))
+
+ if patch_info.removed_patches:
+ print(
+ "\nThe following patches were removed from the patch metadata file:"
+ )
+ for cur_patch_path in patch_info.removed_patches:
+ print("%s" % os.path.basename(cur_patch_path))
+
+
+def main(sys_argv: List[str]):
+ """Applies patches to the source tree and takes action on a failed patch."""
+
+ args_output = GetCommandLineArgs(sys_argv)
+
+ llvm_src_dir = Path(args_output.src_path)
+ if not llvm_src_dir.is_dir():
+ raise ValueError(f"--src_path arg {llvm_src_dir} is not a directory")
+ patches_json_fp = Path(args_output.patch_metadata_file)
+ if not patches_json_fp.is_file():
+ raise ValueError(
+ "--patch_metadata_file arg " f"{patches_json_fp} is not a file"
+ )
+
+ def _apply_all(args):
+ if args.svn_version is None:
+ raise ValueError("--svn_version must be set when applying patches")
+ result = patch_utils.apply_all_from_json(
+ svn_version=args.svn_version,
+ llvm_src_dir=llvm_src_dir,
+ patches_json_fp=patches_json_fp,
+ continue_on_failure=args.failure_mode == FailureModes.CONTINUE,
+ )
+ PrintPatchResults(result)
+
+ def _remove(args):
+ patch_utils.remove_old_patches(
+ args.svn_version, llvm_src_dir, patches_json_fp
+ )
+
+ def _disable(args):
+ patch_utils.update_version_ranges(
+ args.svn_version, llvm_src_dir, patches_json_fp
+ )
+
+ def _test_single(args):
+ if not args.test_patch:
raise ValueError(
- 'Failed to apply patch: %s' % os.path.basename(path_to_patch))
- elif mode == FailureModes.INTERNAL_BISECTION:
- # Determine the exit status for `git bisect run` because of the
- # failed patch in the interval [0, N].
- #
- # NOTE: `git bisect run` exit codes are as follows:
- # 130: Terminates the bisection.
- # 1: Similar as `git bisect bad`.
-
- # Some patch in the interval [0, N) failed, so terminate bisection
- # (the patch stack is broken).
- if (patch_dict_index + 1) != num_patches_to_iterate:
- print('\nTerminating bisection due to patch %s failed to apply '
- 'on SVN version %d.\n' % (os.path.basename(
- cur_patch_dict['rel_patch_path']), svn_version))
-
- # Man page for `git bisect run` states that any value over 127
- # terminates it.
- sys.exit(130)
-
- # Changes to the source tree need to be removed, otherwise some
- # patches may fail when applying the patch to the source tree when
- # `git bisect run` calls this script again.
- CleanSrcTree(src_path)
-
- # The last patch in the interval [0, N] failed to apply, so let
- # `git bisect run` know that the last patch (the patch that failed
- # originally which led to `git bisect run` to be invoked) is bad
- # with exit code 1.
- sys.exit(1)
- else: # Successfully applied patch
- applied_patches.append(os.path.basename(path_to_patch))
-
- # All patches in the interval [0, N] applied successfully, so let
- # `git bisect run` know that the program exited with exit code 0 (good).
- if mode == FailureModes.INTERNAL_BISECTION:
- # Changes to the source tree need to be removed, otherwise some
- # patches may fail when applying the patch to the source tree when
- # `git bisect run` calls this script again.
- #
- # Also, if `git bisect run` will NOT call this script again (terminated) and
- # if the source tree changes are not removed, `git bisect reset` will
- # complain that the changes would need to be 'stashed' or 'removed' in
- # order to reset HEAD back to the bad commit's git hash, so HEAD will remain
- # on the last git hash used by `git bisect run`.
- CleanSrcTree(src_path)
-
- # NOTE: Exit code 0 is similar to `git bisect good`.
- sys.exit(0)
-
- # Create a namedtuple of the patch results.
- PatchInfo = namedtuple('PatchInfo', [
- 'applied_patches', 'failed_patches', 'non_applicable_patches',
- 'disabled_patches', 'removed_patches', 'modified_metadata'
- ])
-
- patch_info = PatchInfo(
- applied_patches=applied_patches,
- failed_patches=failed_patches,
- non_applicable_patches=non_applicable_patches,
- disabled_patches=disabled_patches,
- removed_patches=removed_patches,
- modified_metadata=modified_metadata)
-
- # Determine post actions after iterating through the patches.
- if mode == FailureModes.REMOVE_PATCHES:
- if removed_patches:
- UpdatePatchMetadataFile(patch_metadata_file, applicable_patches)
- elif mode == FailureModes.DISABLE_PATCHES:
- if updated_patch:
- UpdatePatchMetadataFile(patch_metadata_file, applicable_patches)
- elif mode == FailureModes.BISECT_PATCHES:
- PrintPatchResults(patch_info)
- if modified_metadata:
- print('\nThe following patches have been bisected:')
- print('\n'.join(bisected_patches))
-
- # Exiting early because 'bisect_patches' will not be called from other
- # scripts, only this script uses 'bisect_patches'. The intent is to provide
- # bisection information on the patches and aid in the bisection process.
- sys.exit(0)
-
- return patch_info
-
-
-def PrintPatchResults(patch_info):
- """Prints the results of handling the patches of a package.
-
- Args:
- patch_info: A namedtuple that has information on the patches.
- """
-
- if patch_info.applied_patches:
- print('\nThe following patches applied successfully:')
- print('\n'.join(patch_info.applied_patches))
-
- if patch_info.failed_patches:
- print('\nThe following patches failed to apply:')
- print('\n'.join(patch_info.failed_patches))
-
- if patch_info.non_applicable_patches:
- print('\nThe following patches were not applicable:')
- print('\n'.join(patch_info.non_applicable_patches))
-
- if patch_info.modified_metadata:
- print('\nThe patch metadata file %s has been modified' % os.path.basename(
- patch_info.modified_metadata))
-
- if patch_info.disabled_patches:
- print('\nThe following patches were disabled:')
- print('\n'.join(patch_info.disabled_patches))
-
- if patch_info.removed_patches:
- print('\nThe following patches were removed from the patch metadata file:')
- for cur_patch_path in patch_info.removed_patches:
- print('%s' % os.path.basename(cur_patch_path))
-
-
-def main():
- """Applies patches to the source tree and takes action on a failed patch."""
-
- args_output = GetCommandLineArgs()
-
- if args_output.failure_mode != FailureModes.INTERNAL_BISECTION.value:
- # If the SVN version of HEAD is not the same as 'svn_version', then some
- # patches that fail to apply could successfully apply if HEAD's SVN version
- # was the same as 'svn_version'. In other words, HEAD's git hash should be
- # what is being updated to (e.g. LLVM_NEXT_HASH).
- if not args_output.use_src_head:
- VerifyHEADIsTheSameAsSVNVersion(args_output.src_path,
- args_output.svn_version)
- else:
- # `git bisect run` called this script.
- #
- # `git bisect run` moves HEAD each time it invokes this script, so set the
- # 'svn_version' to be current HEAD's SVN version so that the previous
- # SVN version is not used in determining whether a patch is applicable.
- args_output.svn_version = GetHEADSVNVersion(args_output.src_path)
-
- # Get the results of handling the patches of the package.
- patch_info = HandlePatches(
- args_output.svn_version, args_output.patch_metadata_file,
- args_output.filesdir_path, args_output.src_path,
- FailureModes(args_output.failure_mode), args_output.good_svn_version,
- args_output.num_patches_to_iterate, args_output.continue_bisection)
-
- PrintPatchResults(patch_info)
-
-
-if __name__ == '__main__':
- main()
+ "Running with bisect_patches requires the " "--test_patch flag."
+ )
+ svn_version = GetHEADSVNVersion(llvm_src_dir)
+ error_code = CheckPatchApplies(
+ svn_version, llvm_src_dir, patches_json_fp, args.test_patch
+ )
+ # Since this is for bisection, we want to exit with the
+ # GitBisectionCode enum.
+ sys.exit(int(error_code))
+
+ dispatch_table = {
+ FailureModes.FAIL: _apply_all,
+ FailureModes.CONTINUE: _apply_all,
+ FailureModes.REMOVE_PATCHES: _remove,
+ FailureModes.DISABLE_PATCHES: _disable,
+ FailureModes.BISECT_PATCHES: _test_single,
+ }
+
+ if args_output.failure_mode in dispatch_table:
+ dispatch_table[args_output.failure_mode](args_output)
+
+
+if __name__ == "__main__":
+ main(sys.argv[1:])
diff --git a/llvm_tools/patch_manager_unittest.py b/llvm_tools/patch_manager_unittest.py
index 69bb683e..42697d91 100755
--- a/llvm_tools/patch_manager_unittest.py
+++ b/llvm_tools/patch_manager_unittest.py
@@ -1,955 +1,214 @@
#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests when handling patches."""
-from __future__ import print_function
-
import json
-import os
-import subprocess
+from pathlib import Path
+import tempfile
+from typing import Callable
import unittest
-import unittest.mock as mock
+from unittest import mock
import patch_manager
-from failure_modes import FailureModes
-from test_helpers import CallCountsToMockFunctions
-from test_helpers import CreateTemporaryJsonFile
-from test_helpers import WritePrettyJsonFile
+import patch_utils
class PatchManagerTest(unittest.TestCase):
- """Test class when handling patches of packages."""
-
- # Simulate behavior of 'os.path.isdir()' when the path is not a directory.
- @mock.patch.object(os.path, 'isdir', return_value=False)
- def testInvalidDirectoryPassedAsCommandLineArgument(self, mock_isdir):
- test_dir = '/some/path/that/is/not/a/directory'
-
- # Verify the exception is raised when the command line argument for
- # '--filesdir_path' or '--src_path' is not a directory.
- with self.assertRaises(ValueError) as err:
- patch_manager.is_directory(test_dir)
-
- self.assertEqual(
- str(err.exception), 'Path is not a directory: '
- '%s' % test_dir)
-
- mock_isdir.assert_called_once()
-
- # Simulate the behavior of 'os.path.isdir()' when a path to a directory is
- # passed as the command line argument for '--filesdir_path' or '--src_path'.
- @mock.patch.object(os.path, 'isdir', return_value=True)
- def testValidDirectoryPassedAsCommandLineArgument(self, mock_isdir):
- test_dir = '/some/path/that/is/a/directory'
-
- self.assertEqual(patch_manager.is_directory(test_dir), test_dir)
-
- mock_isdir.assert_called_once()
-
- # Simulate behavior of 'os.path.isfile()' when the patch metadata file is does
- # not exist.
- @mock.patch.object(os.path, 'isfile', return_value=False)
- def testInvalidPathToPatchMetadataFilePassedAsCommandLineArgument(
- self, mock_isfile):
-
- abs_path_to_patch_file = '/abs/path/to/PATCHES.json'
-
- # Verify the exception is raised when the command line argument for
- # '--patch_metadata_file' does not exist or is not a file.
- with self.assertRaises(ValueError) as err:
- patch_manager.is_patch_metadata_file(abs_path_to_patch_file)
-
- self.assertEqual(
- str(err.exception), 'Invalid patch metadata file provided: '
- '%s' % abs_path_to_patch_file)
-
- mock_isfile.assert_called_once()
-
- # Simulate the behavior of 'os.path.isfile()' when the path to the patch
- # metadata file exists and is a file.
- @mock.patch.object(os.path, 'isfile', return_value=True)
- def testPatchMetadataFileDoesNotEndInJson(self, mock_isfile):
- abs_path_to_patch_file = '/abs/path/to/PATCHES'
-
- # Verify the exception is raises when the command line argument for
- # '--patch_metadata_file' exists and is a file but does not end in
- # '.json'.
- with self.assertRaises(ValueError) as err:
- patch_manager.is_patch_metadata_file(abs_path_to_patch_file)
-
- self.assertEqual(
- str(err.exception), 'Patch metadata file does not end in ".json": '
- '%s' % abs_path_to_patch_file)
-
- mock_isfile.assert_called_once()
-
- # Simulate the behavior of 'os.path.isfile()' when the command line argument
- # for '--patch_metadata_file' exists and is a file.
- @mock.patch.object(os.path, 'isfile', return_value=True)
- def testValidPatchMetadataFilePassedAsCommandLineArgument(self, mock_isfile):
- abs_path_to_patch_file = '/abs/path/to/PATCHES.json'
-
- self.assertEqual(
- patch_manager.is_patch_metadata_file(abs_path_to_patch_file),
- '%s' % abs_path_to_patch_file)
-
- mock_isfile.assert_called_once()
-
- # Simulate behavior of 'os.path.isdir()' when the path to $FILESDIR
- # does not exist.
- @mock.patch.object(os.path, 'isdir', return_value=False)
- def testInvalidPathToFilesDirWhenConstructingPathToPatch(self, mock_isdir):
- abs_path_to_filesdir = '/abs/path/to/filesdir'
-
- rel_patch_path = 'cherry/fixes_stdout.patch'
-
- # Verify the exception is raised when the the absolute path to $FILESDIR of
- # a package is not a directory.
- with self.assertRaises(ValueError) as err:
- patch_manager.GetPathToPatch(abs_path_to_filesdir, rel_patch_path)
-
- self.assertEqual(
- str(err.exception), 'Invalid path to $FILESDIR provided: '
- '%s' % abs_path_to_filesdir)
-
- mock_isdir.assert_called_once()
-
- # Simulate behavior of 'os.path.isdir()' when the absolute path to the
- # $FILESDIR of a package exists and is a directory.
- @mock.patch.object(os.path, 'isdir', return_value=True)
- # Simulate the behavior of 'os.path.isfile()' when the absolute path to the
- # patch does not exist.
- @mock.patch.object(os.path, 'isfile', return_value=False)
- def testConstructedPathToPatchDoesNotExist(self, mock_isfile, mock_isdir):
- abs_path_to_filesdir = '/abs/path/to/filesdir'
-
- rel_patch_path = 'cherry/fixes_stdout.patch'
-
- abs_patch_path = os.path.join(abs_path_to_filesdir, rel_patch_path)
-
- # Verify the exception is raised when the absolute path to the patch does
+ """Test class when handling patches of packages."""
+
+ # Simulate behavior of 'os.path.isdir()' when the path is not a directory.
+ @mock.patch.object(Path, "is_dir", return_value=False)
+ def testInvalidDirectoryPassedAsCommandLineArgument(self, mock_isdir):
+ src_dir = "/some/path/that/is/not/a/directory"
+ patch_metadata_file = "/some/path/that/is/not/a/file"
+
+ # Verify the exception is raised when the command line argument for
+ # '--filesdir_path' or '--src_path' is not a directory.
+ with self.assertRaises(ValueError):
+ patch_manager.main(
+ [
+ "--src_path",
+ src_dir,
+ "--patch_metadata_file",
+ patch_metadata_file,
+ ]
+ )
+ mock_isdir.assert_called_once()
+
+ # Simulate behavior of 'os.path.isfile()' when the patch metadata file is does
# not exist.
- with self.assertRaises(ValueError) as err:
- patch_manager.GetPathToPatch(abs_path_to_filesdir, rel_patch_path)
-
- self.assertEqual(
- str(err.exception), 'The absolute path %s to the patch %s does not '
- 'exist' % (abs_patch_path, rel_patch_path))
-
- mock_isdir.assert_called_once()
-
- mock_isfile.assert_called_once()
-
- # Simulate behavior of 'os.path.isdir()' when the absolute path to the
- # $FILESDIR of a package exists and is a directory.
- @mock.patch.object(os.path, 'isdir', return_value=True)
- # Simulate behavior of 'os.path.isfile()' when the absolute path to the
- # patch exists and is a file.
- @mock.patch.object(os.path, 'isfile', return_value=True)
- def testConstructedPathToPatchSuccessfully(self, mock_isfile, mock_isdir):
- abs_path_to_filesdir = '/abs/path/to/filesdir'
-
- rel_patch_path = 'cherry/fixes_stdout.patch'
-
- abs_patch_path = os.path.join(abs_path_to_filesdir, rel_patch_path)
-
- self.assertEqual(
- patch_manager.GetPathToPatch(abs_path_to_filesdir, rel_patch_path),
- abs_patch_path)
-
- mock_isdir.assert_called_once()
-
- mock_isfile.assert_called_once()
-
- def testSuccessfullyGetPatchMetadataForPatchWithNoMetadata(self):
- expected_patch_metadata = 0, None, False
-
- test_patch = {
- 'comment': 'Redirects output to stdout',
- 'rel_patch_path': 'cherry/fixes_stdout.patch'
- }
-
- self.assertEqual(
- patch_manager.GetPatchMetadata(test_patch), expected_patch_metadata)
-
- def testSuccessfullyGetPatchMetdataForPatchWithSomeMetadata(self):
- expected_patch_metadata = 0, 1000, False
-
- test_patch = {
- 'comment': 'Redirects output to stdout',
- 'rel_patch_path': 'cherry/fixes_stdout.patch',
- 'version_range': {
- 'until': 1000,
- }
- }
-
- self.assertEqual(
- patch_manager.GetPatchMetadata(test_patch), expected_patch_metadata)
-
- def testFailedToApplyPatchWhenInvalidSrcPathIsPassedIn(self):
- src_path = '/abs/path/to/src'
-
- abs_patch_path = '/abs/path/to/filesdir/cherry/fixes_stdout.patch'
-
- # Verify the exception is raised when the absolute path to the unpacked
- # sources of a package is not a directory.
- with self.assertRaises(ValueError) as err:
- patch_manager.ApplyPatch(src_path, abs_patch_path)
-
- self.assertEqual(
- str(err.exception), 'Invalid src path provided: %s' % src_path)
-
- # Simulate behavior of 'os.path.isdir()' when the absolute path to the
- # unpacked sources of the package is valid and exists.
- @mock.patch.object(os.path, 'isdir', return_value=True)
- def testFailedToApplyPatchWhenPatchPathIsInvalid(self, mock_isdir):
- src_path = '/abs/path/to/src'
-
- abs_patch_path = '/abs/path/to/filesdir/cherry/fixes_stdout.patch'
-
- # Verify the exception is raised when the absolute path to the patch does
- # not exist or is not a file.
- with self.assertRaises(ValueError) as err:
- patch_manager.ApplyPatch(src_path, abs_patch_path)
-
- self.assertEqual(
- str(err.exception), 'Invalid patch file provided: '
- '%s' % abs_patch_path)
-
- mock_isdir.assert_called_once()
-
- # Simulate behavior of 'os.path.isdir()' when the absolute path to the
- # unpacked sources of the package is valid and exists.
- @mock.patch.object(os.path, 'isdir', return_value=True)
- @mock.patch.object(os.path, 'isfile', return_value=True)
- # Simulate behavior of 'os.path.isfile()' when the absolute path to the
- # patch exists and is a file.
- @mock.patch.object(patch_manager, 'check_output')
- def testFailedToApplyPatchInDryRun(self, mock_dry_run, mock_isfile,
- mock_isdir):
-
- # Simulate behavior of 'subprocess.check_output()' when '--dry-run'
- # fails on the applying patch.
- def FailedToApplyPatch(test_patch_cmd):
- # First argument is the return error code, the second argument is the
- # command that was run, and the third argument is the output.
- raise subprocess.CalledProcessError(1, test_patch_cmd, None)
-
- mock_dry_run.side_effect = FailedToApplyPatch
-
- src_path = '/abs/path/to/src'
-
- abs_patch_path = '/abs/path/to/filesdir/cherry/fixes_stdout.patch'
-
- self.assertEqual(patch_manager.ApplyPatch(src_path, abs_patch_path), False)
-
- mock_isdir.assert_called_once()
-
- mock_isfile.assert_called_once()
-
- mock_dry_run.assert_called_once()
-
- # Simulate behavior of 'os.path.isdir()' when the absolute path to the
- # unpacked sources of the package is valid and exists.
- @mock.patch.object(os.path, 'isdir', return_value=True)
- @mock.patch.object(os.path, 'isfile', return_value=True)
- # Simulate behavior of 'os.path.isfile()' when the absolute path to the
- # patch exists and is a file.
- @mock.patch.object(patch_manager, 'check_output')
- def testSuccessfullyAppliedPatch(self, mock_dry_run, mock_isfile, mock_isdir):
- src_path = '/abs/path/to/src'
-
- abs_patch_path = '/abs/path/to/filesdir/cherry/fixes_stdout.patch'
-
- self.assertEqual(patch_manager.ApplyPatch(src_path, abs_patch_path), True)
-
- mock_isdir.assert_called_once()
-
- mock_isfile.assert_called_once()
-
- self.assertEqual(mock_dry_run.call_count, 2)
-
- def testFailedToUpdatePatchMetadataFileWhenPatchFileNotEndInJson(self):
- patch = [{
- 'comment': 'Redirects output to stdout',
- 'rel_patch_path': 'cherry/fixes_output.patch',
- 'version_range': {
- 'from': 10,
- },
- }]
-
- abs_patch_path = '/abs/path/to/filesdir/PATCHES'
-
- # Verify the exception is raised when the absolute path to the patch
- # metadata file does not end in '.json'.
- with self.assertRaises(ValueError) as err:
- patch_manager.UpdatePatchMetadataFile(abs_patch_path, patch)
-
- self.assertEqual(
- str(err.exception), 'File does not end in ".json": '
- '%s' % abs_patch_path)
-
- def testSuccessfullyUpdatedPatchMetadataFile(self):
- test_updated_patch_metadata = [{
- 'comment': 'Redirects output to stdout',
- 'rel_patch_path': 'cherry/fixes_output.patch',
- 'version_range': {
- 'from': 10,
- }
- }]
-
- expected_patch_metadata = {
- 'comment': 'Redirects output to stdout',
- 'rel_patch_path': 'cherry/fixes_output.patch',
- 'version_range': {
- 'from': 10,
- }
- }
-
- with CreateTemporaryJsonFile() as json_test_file:
- patch_manager.UpdatePatchMetadataFile(json_test_file,
- test_updated_patch_metadata)
-
- # Make sure the updated patch metadata was written into the temporary
- # .json file.
- with open(json_test_file) as patch_file:
- patch_contents = json.load(patch_file)
-
- self.assertEqual(len(patch_contents), 1)
-
- self.assertDictEqual(patch_contents[0], expected_patch_metadata)
-
- @mock.patch.object(patch_manager, 'GetPathToPatch')
- def testExceptionThrownWhenHandlingPatches(self, mock_get_path_to_patch):
- filesdir_path = '/abs/path/to/filesdir'
-
- abs_patch_path = '/abs/path/to/filesdir/cherry/fixes_output.patch'
-
- rel_patch_path = 'cherry/fixes_output.patch'
-
- # Simulate behavior of 'GetPathToPatch()' when the absolute path to the
- # patch does not exist.
- def PathToPatchDoesNotExist(filesdir_path, rel_patch_path):
- raise ValueError('The absolute path to %s does not exist' % os.path.join(
- filesdir_path, rel_patch_path))
-
- # Use the test function to simulate the behavior of 'GetPathToPatch()'.
- mock_get_path_to_patch.side_effect = PathToPatchDoesNotExist
-
- test_patch_metadata = [{
- 'comment': 'Redirects output to stdout',
- 'rel_patch_path': rel_patch_path,
- 'version_range': {
- 'from': 10,
- }
- }]
-
- with CreateTemporaryJsonFile() as json_test_file:
- # Write the test patch metadata to the temporary .json file.
- with open(json_test_file, 'w') as json_file:
- WritePrettyJsonFile(test_patch_metadata, json_file)
-
- src_path = '/some/path/to/src'
-
- revision = 1000
-
- # Verify the exception is raised when the absolute path to a patch does
- # not exist.
- with self.assertRaises(ValueError) as err:
- patch_manager.HandlePatches(revision, json_test_file, filesdir_path,
- src_path, FailureModes.FAIL)
-
- self.assertEqual(
- str(err.exception),
- 'The absolute path to %s does not exist' % abs_patch_path)
-
- mock_get_path_to_patch.assert_called_once_with(filesdir_path,
- rel_patch_path)
-
- @mock.patch.object(patch_manager, 'GetPathToPatch')
- # Simulate behavior for 'ApplyPatch()' when an applicable patch failed to
- # apply.
- @mock.patch.object(patch_manager, 'ApplyPatch', return_value=False)
- def testExceptionThrownOnAFailedPatchInFailMode(self, mock_apply_patch,
- mock_get_path_to_patch):
- filesdir_path = '/abs/path/to/filesdir'
-
- abs_patch_path = '/abs/path/to/filesdir/cherry/fixes_output.patch'
-
- rel_patch_path = 'cherry/fixes_output.patch'
-
- # Simulate behavior for 'GetPathToPatch()' when successfully constructed the
- # absolute path to the patch and the patch exists.
- mock_get_path_to_patch.return_value = abs_patch_path
-
- test_patch_metadata = [{
- 'comment': 'Redirects output to stdout',
- 'rel_patch_path': rel_patch_path,
- 'version_range': {
- 'from': 1000,
- },
- }]
-
- with CreateTemporaryJsonFile() as json_test_file:
- # Write the test patch metadata to the temporary .json file.
- with open(json_test_file, 'w') as json_file:
- WritePrettyJsonFile(test_patch_metadata, json_file)
-
- src_path = '/some/path/to/src'
-
- revision = 1000
-
- patch_name = 'fixes_output.patch'
-
- # Verify the exception is raised when the mode is 'fail' and an applicable
- # patch fails to apply.
- with self.assertRaises(ValueError) as err:
- patch_manager.HandlePatches(revision, json_test_file, filesdir_path,
- src_path, FailureModes.FAIL)
-
- self.assertEqual(
- str(err.exception), 'Failed to apply patch: %s' % patch_name)
-
- mock_get_path_to_patch.assert_called_once_with(filesdir_path,
- rel_patch_path)
-
- mock_apply_patch.assert_called_once_with(src_path, abs_patch_path)
-
- @mock.patch.object(patch_manager, 'GetPathToPatch')
- @mock.patch.object(patch_manager, 'ApplyPatch')
- def testSomePatchesFailedToApplyInContinueMode(self, mock_apply_patch,
- mock_get_path_to_patch):
-
- test_patch_1 = {
- 'comment': 'Redirects output to stdout',
- 'rel_patch_path': 'cherry/fixes_output.patch',
- 'version_range': {
- 'from': 1000,
- 'until': 1250
- }
- }
-
- test_patch_2 = {
- 'comment': 'Fixes input',
- 'rel_patch_path': 'cherry/fixes_input.patch',
- 'version_range': {
- 'from': 1000
- }
- }
-
- test_patch_3 = {
- 'comment': 'Adds a warning',
- 'rel_patch_path': 'add_warning.patch',
- 'version_range': {
- 'from': 750,
- 'until': 1500
- }
- }
-
- test_patch_4 = {
- 'comment': 'Adds a helper function',
- 'rel_patch_path': 'add_helper.patch',
- 'version_range': {
- 'from': 20,
- 'until': 900
- }
- }
-
- test_patch_metadata = [
- test_patch_1, test_patch_2, test_patch_3, test_patch_4
- ]
-
- abs_path_to_filesdir = '/abs/path/to/filesdir'
-
- # Simulate behavior for 'GetPathToPatch()' when successfully constructed the
- # absolute path to the patch and the patch exists.
- @CallCountsToMockFunctions
- def MultipleCallsToGetPatchPath(call_count, filesdir_path, rel_patch_path):
- self.assertEqual(filesdir_path, abs_path_to_filesdir)
-
- if call_count < 4:
- self.assertEqual(rel_patch_path,
- test_patch_metadata[call_count]['rel_patch_path'])
-
- return os.path.join(abs_path_to_filesdir,
- test_patch_metadata[call_count]['rel_patch_path'])
-
- assert False, 'Unexpectedly called more than 4 times.'
-
- # Simulate behavior for 'ApplyPatch()' when applying multiple applicable
- # patches.
- @CallCountsToMockFunctions
- def MultipleCallsToApplyPatches(call_count, _src_path, path_to_patch):
- if call_count < 3:
- self.assertEqual(
- path_to_patch,
- os.path.join(abs_path_to_filesdir,
- test_patch_metadata[call_count]['rel_patch_path']))
-
- # Simulate that the first patch successfully applied.
- return call_count == 0
-
- # 'ApplyPatch()' was called more times than expected (3 times).
- assert False, 'Unexpectedly called more than 3 times.'
-
- # Use test functions to simulate behavior.
- mock_get_path_to_patch.side_effect = MultipleCallsToGetPatchPath
- mock_apply_patch.side_effect = MultipleCallsToApplyPatches
-
- expected_applied_patches = ['fixes_output.patch']
- expected_failed_patches = ['fixes_input.patch', 'add_warning.patch']
- expected_non_applicable_patches = ['add_helper.patch']
-
- expected_patch_info_dict = {
- 'applied_patches': expected_applied_patches,
- 'failed_patches': expected_failed_patches,
- 'non_applicable_patches': expected_non_applicable_patches,
- 'disabled_patches': [],
- 'removed_patches': [],
- 'modified_metadata': None
- }
-
- with CreateTemporaryJsonFile() as json_test_file:
- # Write the test patch metadata to the temporary .json file.
- with open(json_test_file, 'w') as json_file:
- WritePrettyJsonFile(test_patch_metadata, json_file)
-
- src_path = '/some/path/to/src/'
-
- revision = 1000
-
- patch_info = patch_manager.HandlePatches(revision, json_test_file,
- abs_path_to_filesdir, src_path,
- FailureModes.CONTINUE)
-
- self.assertDictEqual(patch_info._asdict(), expected_patch_info_dict)
-
- self.assertEqual(mock_get_path_to_patch.call_count, 4)
-
- self.assertEqual(mock_apply_patch.call_count, 3)
-
- @mock.patch.object(patch_manager, 'GetPathToPatch')
- @mock.patch.object(patch_manager, 'ApplyPatch')
- def testSomePatchesAreDisabled(self, mock_apply_patch,
- mock_get_path_to_patch):
-
- test_patch_1 = {
- 'comment': 'Redirects output to stdout',
- 'rel_patch_path': 'cherry/fixes_output.patch',
- 'version_range': {
- 'from': 1000,
- 'until': 1190
- }
- }
-
- test_patch_2 = {
- 'comment': 'Fixes input',
- 'rel_patch_path': 'cherry/fixes_input.patch',
- 'version_range': {
- 'from': 1000
- }
- }
-
- test_patch_3 = {
- 'comment': 'Adds a warning',
- 'rel_patch_path': 'add_warning.patch',
- 'version_range': {
- 'from': 750,
- 'until': 1500
- }
- }
-
- test_patch_4 = {
- 'comment': 'Adds a helper function',
- 'rel_patch_path': 'add_helper.patch',
- 'version_range': {
- 'from': 20,
- 'until': 2000
- }
- }
-
- test_patch_metadata = [
- test_patch_1, test_patch_2, test_patch_3, test_patch_4
- ]
-
- abs_path_to_filesdir = '/abs/path/to/filesdir'
-
- # Simulate behavior for 'GetPathToPatch()' when successfully constructed the
- # absolute path to the patch and the patch exists.
- @CallCountsToMockFunctions
- def MultipleCallsToGetPatchPath(call_count, filesdir_path, rel_patch_path):
- self.assertEqual(filesdir_path, abs_path_to_filesdir)
-
- if call_count < 4:
- self.assertEqual(rel_patch_path,
- test_patch_metadata[call_count]['rel_patch_path'])
-
- return os.path.join(abs_path_to_filesdir,
- test_patch_metadata[call_count]['rel_patch_path'])
-
- # 'GetPathToPatch()' was called more times than expected (4 times).
- assert False, 'Unexpectedly called more than 4 times.'
-
- # Simulate behavior for 'ApplyPatch()' when applying multiple applicable
- # patches.
- @CallCountsToMockFunctions
- def MultipleCallsToApplyPatches(call_count, _src_path, path_to_patch):
- if call_count < 3:
- self.assertEqual(
- path_to_patch,
- os.path.join(abs_path_to_filesdir,
- test_patch_metadata[call_count + 1]['rel_patch_path']))
-
- # Simulate that the second patch applied successfully.
- return call_count == 1
-
- # 'ApplyPatch()' was called more times than expected (3 times).
- assert False, 'Unexpectedly called more than 3 times.'
-
- # Use test functions to simulate behavior.
- mock_get_path_to_patch.side_effect = MultipleCallsToGetPatchPath
- mock_apply_patch.side_effect = MultipleCallsToApplyPatches
-
- expected_applied_patches = ['add_warning.patch']
- expected_failed_patches = ['fixes_input.patch', 'add_helper.patch']
- expected_disabled_patches = ['fixes_input.patch', 'add_helper.patch']
- expected_non_applicable_patches = ['fixes_output.patch']
-
- # Assigned 'None' for now, but it is expected that the patch metadata file
- # will be modified, so the 'expected_patch_info_dict's' value for the
- # key 'modified_metadata' will get updated to the temporary .json file once
- # the file is created.
- expected_modified_metadata_file = None
-
- expected_patch_info_dict = {
- 'applied_patches': expected_applied_patches,
- 'failed_patches': expected_failed_patches,
- 'non_applicable_patches': expected_non_applicable_patches,
- 'disabled_patches': expected_disabled_patches,
- 'removed_patches': [],
- 'modified_metadata': expected_modified_metadata_file
- }
-
- with CreateTemporaryJsonFile() as json_test_file:
- # Write the test patch metadata to the temporary .json file.
- with open(json_test_file, 'w') as json_file:
- WritePrettyJsonFile(test_patch_metadata, json_file)
-
- expected_patch_info_dict['modified_metadata'] = json_test_file
-
- src_path = '/some/path/to/src/'
-
- revision = 1200
-
- patch_info = patch_manager.HandlePatches(revision, json_test_file,
- abs_path_to_filesdir, src_path,
- FailureModes.DISABLE_PATCHES)
-
- self.assertDictEqual(patch_info._asdict(), expected_patch_info_dict)
-
- # 'test_patch_1' and 'test_patch_3' were not modified/disabled, so their
- # dictionary is the same, but 'test_patch_2' and 'test_patch_4' were
- # disabled, so their 'end_version' would be set to 1200, which was the
- # value passed into 'HandlePatches()' for the 'svn_version'.
- test_patch_2['end_version'] = 1200
- test_patch_4['end_version'] = 1200
-
- expected_json_file = [
- test_patch_1, test_patch_2, test_patch_3, test_patch_4
- ]
-
- # Make sure the updated patch metadata was written into the temporary
- # .json file.
- with open(json_test_file) as patch_file:
- new_json_file_contents = json.load(patch_file)
-
- self.assertListEqual(new_json_file_contents, expected_json_file)
-
- self.assertEqual(mock_get_path_to_patch.call_count, 4)
-
- self.assertEqual(mock_apply_patch.call_count, 3)
-
- @mock.patch.object(patch_manager, 'GetPathToPatch')
- @mock.patch.object(patch_manager, 'ApplyPatch')
- def testSomePatchesAreRemoved(self, mock_apply_patch, mock_get_path_to_patch):
- # For the 'remove_patches' mode, this patch is expected to be in the
- # 'non_applicable_patches' list and 'removed_patches' list because
- # the 'svn_version' (1500) >= 'end_version' (1190).
- test_patch_1 = {
- 'comment': 'Redirects output to stdout',
- 'rel_patch_path': 'cherry/fixes_output.patch',
- 'version_range': {
- 'from': 1000,
- 'until': 1190
- }
- }
-
- # For the 'remove_patches' mode, this patch is expected to be in the
- # 'applicable_patches' list (which is the list that the .json file will be
- # updated with) because the 'svn_version' < 'inf' (this patch does not have
- # an 'end_version' value which implies 'end_version' == 'inf').
- test_patch_2 = {
- 'comment': 'Fixes input',
- 'rel_patch_path': 'cherry/fixes_input.patch',
- 'version_range': {
- 'from': 1000
- }
- }
-
- # For the 'remove_patches' mode, this patch is expected to be in the
- # 'non_applicable_patches' list and 'removed_patches' list because
- # the 'svn_version' (1500) >= 'end_version' (1500).
- test_patch_3 = {
- 'comment': 'Adds a warning',
- 'rel_patch_path': 'add_warning.patch',
- 'version_range': {
- 'from': 750,
- 'until': 1500
- }
- }
-
- # For the 'remove_patches' mode, this patch is expected to be in the
- # 'non_applicable_patches' list and 'removed_patches' list because
- # the 'svn_version' (1500) >= 'end_version' (1400).
- test_patch_4 = {
- 'comment': 'Adds a helper function',
- 'rel_patch_path': 'add_helper.patch',
- 'version_range': {
- 'from': 20,
- 'until': 1400
- }
- }
-
- test_patch_metadata = [
- test_patch_1, test_patch_2, test_patch_3, test_patch_4
- ]
-
- abs_path_to_filesdir = '/abs/path/to/filesdir'
-
- # Simulate behavior for 'GetPathToPatch()' when successfully constructed the
- # absolute path to the patch and the patch exists.
- @CallCountsToMockFunctions
- def MultipleCallsToGetPatchPath(call_count, filesdir_path, rel_patch_path):
- self.assertEqual(filesdir_path, abs_path_to_filesdir)
-
- if call_count < 4:
- self.assertEqual(rel_patch_path,
- test_patch_metadata[call_count]['rel_patch_path'])
-
- return os.path.join(abs_path_to_filesdir,
- test_patch_metadata[call_count]['rel_patch_path'])
-
- assert False, 'Unexpectedly called more than 4 times.'
-
- # Use the test function to simulate behavior of 'GetPathToPatch()'.
- mock_get_path_to_patch.side_effect = MultipleCallsToGetPatchPath
-
- expected_applied_patches = []
- expected_failed_patches = []
- expected_disabled_patches = []
- expected_non_applicable_patches = [
- 'fixes_output.patch', 'add_warning.patch', 'add_helper.patch'
- ]
- expected_removed_patches = [
- '/abs/path/to/filesdir/cherry/fixes_output.patch',
- '/abs/path/to/filesdir/add_warning.patch',
- '/abs/path/to/filesdir/add_helper.patch'
- ]
-
- # Assigned 'None' for now, but it is expected that the patch metadata file
- # will be modified, so the 'expected_patch_info_dict's' value for the
- # key 'modified_metadata' will get updated to the temporary .json file once
- # the file is created.
- expected_modified_metadata_file = None
-
- expected_patch_info_dict = {
- 'applied_patches': expected_applied_patches,
- 'failed_patches': expected_failed_patches,
- 'non_applicable_patches': expected_non_applicable_patches,
- 'disabled_patches': expected_disabled_patches,
- 'removed_patches': expected_removed_patches,
- 'modified_metadata': expected_modified_metadata_file
- }
-
- with CreateTemporaryJsonFile() as json_test_file:
- # Write the test patch metadata to the temporary .json file.
- with open(json_test_file, 'w') as json_file:
- WritePrettyJsonFile(test_patch_metadata, json_file)
-
- expected_patch_info_dict['modified_metadata'] = json_test_file
-
- abs_path_to_filesdir = '/abs/path/to/filesdir'
-
- src_path = '/some/path/to/src/'
-
- revision = 1500
-
- patch_info = patch_manager.HandlePatches(revision, json_test_file,
- abs_path_to_filesdir, src_path,
- FailureModes.REMOVE_PATCHES)
-
- self.assertDictEqual(patch_info._asdict(), expected_patch_info_dict)
-
- # 'test_patch_2' was an applicable patch, so this patch will be the only
- # patch that is in temporary .json file. The other patches were not
- # applicable (they failed the applicable check), so they will not be in
- # the .json file.
- expected_json_file = [test_patch_2]
-
- # Make sure the updated patch metadata was written into the temporary
- # .json file.
- with open(json_test_file) as patch_file:
- new_json_file_contents = json.load(patch_file)
-
- self.assertListEqual(new_json_file_contents, expected_json_file)
-
- self.assertEqual(mock_get_path_to_patch.call_count, 4)
-
- mock_apply_patch.assert_not_called()
-
- @mock.patch.object(patch_manager, 'GetPathToPatch')
- @mock.patch.object(patch_manager, 'ApplyPatch')
- def testSuccessfullyDidNotRemoveAFuturePatch(self, mock_apply_patch,
- mock_get_path_to_patch):
-
- # For the 'remove_patches' mode, this patch is expected to be in the
- # 'non_applicable_patches' list and 'removed_patches' list because
- # the 'svn_version' (1200) >= 'end_version' (1190).
- test_patch_1 = {
- 'comment': 'Redirects output to stdout',
- 'rel_patch_path': 'cherry/fixes_output.patch',
- 'version_range': {
- 'from': 1000,
- 'until': 1190
- }
- }
-
- # For the 'remove_patches' mode, this patch is expected to be in the
- # 'applicable_patches' list (which is the list that the .json file will be
- # updated with) because the 'svn_version' < 'inf' (this patch does not have
- # an 'end_version' value which implies 'end_version' == 'inf').
- test_patch_2 = {
- 'comment': 'Fixes input',
- 'rel_patch_path': 'cherry/fixes_input.patch',
- 'version_range': {
- 'from': 1000,
- }
- }
-
- # For the 'remove_patches' mode, this patch is expected to be in the
- # 'applicable_patches' list because 'svn_version' >= 'start_version' and
- # 'svn_version' < 'end_version'.
- test_patch_3 = {
- 'comment': 'Adds a warning',
- 'rel_patch_path': 'add_warning.patch',
- 'version_range': {
- 'from': 750,
- 'until': 1500
- }
- }
-
- # For the 'remove_patches' mode, this patch is expected to be in the
- # 'applicable_patches' list because the patch is from the future (e.g.
- # 'start_version' > 'svn_version' (1200), so it should NOT be removed.
- test_patch_4 = {
- 'comment': 'Adds a helper function',
- 'rel_patch_path': 'add_helper.patch',
- 'version_range': {
- 'from': 1600,
- 'until': 2000
- }
- }
-
- test_patch_metadata = [
- test_patch_1, test_patch_2, test_patch_3, test_patch_4
- ]
-
- abs_path_to_filesdir = '/abs/path/to/filesdir'
-
- # Simulate behavior for 'GetPathToPatch()' when successfully constructed the
- # absolute path to the patch and the patch exists.
- @CallCountsToMockFunctions
- def MultipleCallsToGetPatchPath(call_count, filesdir_path, rel_patch_path):
- self.assertEqual(filesdir_path, abs_path_to_filesdir)
-
- if call_count < 4:
- self.assertEqual(rel_patch_path,
- test_patch_metadata[call_count]['rel_patch_path'])
-
- return os.path.join(abs_path_to_filesdir,
- test_patch_metadata[call_count]['rel_patch_path'])
-
- # 'GetPathToPatch()' was called more times than expected (4 times).
- assert False, 'Unexpectedly called more than 4 times.'
-
- # Use the test function to simulate behavior of 'GetPathToPatch()'.
- mock_get_path_to_patch.side_effect = MultipleCallsToGetPatchPath
-
- expected_applied_patches = []
- expected_failed_patches = []
- expected_disabled_patches = []
-
- # 'add_helper.patch' is still a 'non applicable' patch meaning it does not
- # apply in revision 1200 but it will NOT be removed because it is a future
- # patch.
- expected_non_applicable_patches = ['fixes_output.patch', 'add_helper.patch']
- expected_removed_patches = [
- '/abs/path/to/filesdir/cherry/fixes_output.patch'
- ]
-
- # Assigned 'None' for now, but it is expected that the patch metadata file
- # will be modified, so the 'expected_patch_info_dict's' value for the
- # key 'modified_metadata' will get updated to the temporary .json file once
- # the file is created.
- expected_modified_metadata_file = None
-
- expected_patch_info_dict = {
- 'applied_patches': expected_applied_patches,
- 'failed_patches': expected_failed_patches,
- 'non_applicable_patches': expected_non_applicable_patches,
- 'disabled_patches': expected_disabled_patches,
- 'removed_patches': expected_removed_patches,
- 'modified_metadata': expected_modified_metadata_file
- }
-
- with CreateTemporaryJsonFile() as json_test_file:
- # Write the test patch metadata to the temporary .json file.
- with open(json_test_file, 'w') as json_file:
- WritePrettyJsonFile(test_patch_metadata, json_file)
-
- expected_patch_info_dict['modified_metadata'] = json_test_file
-
- src_path = '/some/path/to/src/'
-
- revision = 1200
-
- patch_info = patch_manager.HandlePatches(revision, json_test_file,
- abs_path_to_filesdir, src_path,
- FailureModes.REMOVE_PATCHES)
-
- self.assertDictEqual(patch_info._asdict(), expected_patch_info_dict)
-
- # 'test_patch_2' was an applicable patch, so this patch will be the only
- # patch that is in temporary .json file. The other patches were not
- # applicable (they failed the applicable check), so they will not be in
- # the .json file.
- expected_json_file = [test_patch_2, test_patch_3, test_patch_4]
-
- # Make sure the updated patch metadata was written into the temporary
- # .json file.
- with open(json_test_file) as patch_file:
- new_json_file_contents = json.load(patch_file)
-
- self.assertListEqual(new_json_file_contents, expected_json_file)
-
- self.assertEqual(mock_get_path_to_patch.call_count, 4)
-
- mock_apply_patch.assert_not_called()
-
-
-if __name__ == '__main__':
- unittest.main()
+ @mock.patch.object(Path, "is_file", return_value=False)
+ def testInvalidPathToPatchMetadataFilePassedAsCommandLineArgument(
+ self, mock_isfile
+ ):
+ src_dir = "/some/path/that/is/not/a/directory"
+ patch_metadata_file = "/some/path/that/is/not/a/file"
+
+ # Verify the exception is raised when the command line argument for
+ # '--filesdir_path' or '--src_path' is not a directory.
+ with mock.patch.object(Path, "is_dir", return_value=True):
+ with self.assertRaises(ValueError):
+ patch_manager.main(
+ [
+ "--src_path",
+ src_dir,
+ "--patch_metadata_file",
+ patch_metadata_file,
+ ]
+ )
+ mock_isfile.assert_called_once()
+
+ @mock.patch("builtins.print")
+ @mock.patch.object(patch_utils, "git_clean_context")
+ def testCheckPatchApplies(self, _, mock_git_clean_context):
+ """Tests whether we can apply a single patch for a given svn_version."""
+ mock_git_clean_context.return_value = mock.MagicMock()
+ with tempfile.TemporaryDirectory(
+ prefix="patch_manager_unittest"
+ ) as dirname:
+ dirpath = Path(dirname)
+ patch_entries = [
+ patch_utils.PatchEntry(
+ dirpath,
+ metadata=None,
+ platforms=[],
+ rel_patch_path="another.patch",
+ version_range={
+ "from": 9,
+ "until": 20,
+ },
+ ),
+ patch_utils.PatchEntry(
+ dirpath,
+ metadata=None,
+ platforms=["chromiumos"],
+ rel_patch_path="example.patch",
+ version_range={
+ "from": 1,
+ "until": 10,
+ },
+ ),
+ patch_utils.PatchEntry(
+ dirpath,
+ metadata=None,
+ platforms=["chromiumos"],
+ rel_patch_path="patch_after.patch",
+ version_range={
+ "from": 1,
+ "until": 5,
+ },
+ ),
+ ]
+ patches_path = dirpath / "PATCHES.json"
+ with patch_utils.atomic_write(patches_path, encoding="utf-8") as f:
+ json.dump([pe.to_dict() for pe in patch_entries], f)
+
+ def _harness1(
+ version: int,
+ return_value: patch_utils.PatchResult,
+ expected: patch_manager.GitBisectionCode,
+ ):
+ with mock.patch.object(
+ patch_utils.PatchEntry,
+ "apply",
+ return_value=return_value,
+ ) as m:
+ result = patch_manager.CheckPatchApplies(
+ version,
+ dirpath,
+ patches_path,
+ "example.patch",
+ )
+ self.assertEqual(result, expected)
+ m.assert_called()
+
+ _harness1(
+ 1,
+ patch_utils.PatchResult(True, {}),
+ patch_manager.GitBisectionCode.GOOD,
+ )
+ _harness1(
+ 2,
+ patch_utils.PatchResult(True, {}),
+ patch_manager.GitBisectionCode.GOOD,
+ )
+ _harness1(
+ 2,
+ patch_utils.PatchResult(False, {}),
+ patch_manager.GitBisectionCode.BAD,
+ )
+ _harness1(
+ 11,
+ patch_utils.PatchResult(False, {}),
+ patch_manager.GitBisectionCode.BAD,
+ )
+
+ def _harness2(
+ version: int,
+ application_func: Callable,
+ expected: patch_manager.GitBisectionCode,
+ ):
+ with mock.patch.object(
+ patch_utils,
+ "apply_single_patch_entry",
+ application_func,
+ ):
+ result = patch_manager.CheckPatchApplies(
+ version,
+ dirpath,
+ patches_path,
+ "example.patch",
+ )
+ self.assertEqual(result, expected)
+
+ # Check patch can apply and fail with good return codes.
+ def _apply_patch_entry_mock1(v, _, patch_entry, **__):
+ return patch_entry.can_patch_version(v), None
+
+ _harness2(
+ 1,
+ _apply_patch_entry_mock1,
+ patch_manager.GitBisectionCode.GOOD,
+ )
+ _harness2(
+ 11,
+ _apply_patch_entry_mock1,
+ patch_manager.GitBisectionCode.BAD,
+ )
+
+ # Early exit check, shouldn't apply later failing patch.
+ def _apply_patch_entry_mock2(v, _, patch_entry, **__):
+ if (
+ patch_entry.can_patch_version(v)
+ and patch_entry.rel_patch_path == "patch_after.patch"
+ ):
+ return False, {"filename": mock.Mock()}
+ return True, None
+
+ _harness2(
+ 1,
+ _apply_patch_entry_mock2,
+ patch_manager.GitBisectionCode.GOOD,
+ )
+
+ # Skip check, should exit early on the first patch.
+ def _apply_patch_entry_mock3(v, _, patch_entry, **__):
+ if (
+ patch_entry.can_patch_version(v)
+ and patch_entry.rel_patch_path == "another.patch"
+ ):
+ return False, {"filename": mock.Mock()}
+ return True, None
+
+ _harness2(
+ 9,
+ _apply_patch_entry_mock3,
+ patch_manager.GitBisectionCode.SKIP,
+ )
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/llvm_tools/patch_sync/src/android_utils.rs b/llvm_tools/patch_sync/src/android_utils.rs
index 77cb4b8a..70bca189 100644
--- a/llvm_tools/patch_sync/src/android_utils.rs
+++ b/llvm_tools/patch_sync/src/android_utils.rs
@@ -1,3 +1,7 @@
+// Copyright 2022 The ChromiumOS Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
use std::path::Path;
use std::process::Command;
diff --git a/llvm_tools/patch_sync/src/main.rs b/llvm_tools/patch_sync/src/main.rs
index c244f1c0..a6c340be 100644
--- a/llvm_tools/patch_sync/src/main.rs
+++ b/llvm_tools/patch_sync/src/main.rs
@@ -1,3 +1,7 @@
+// Copyright 2022 The ChromiumOS Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
mod android_utils;
mod patch_parsing;
mod version_control;
@@ -9,7 +13,7 @@ use std::path::{Path, PathBuf};
use anyhow::{Context, Result};
use structopt::StructOpt;
-use patch_parsing::{filter_patches_by_platform, PatchCollection, PatchDictSchema};
+use patch_parsing::{filter_patches_by_platform, PatchCollection, PatchDictSchema, VersionRange};
use version_control::RepoSetupContext;
fn main() -> Result<()> {
@@ -133,13 +137,21 @@ fn transpose_subcmd(args: TransposeOpt) -> Result<()> {
let android_patches_path = ctx.android_patches_path();
// Get new Patches -------------------------------------------------------
- let (cur_cros_collection, new_cros_patches) = patch_parsing::new_patches(
+ let patch_parsing::PatchTemporalDiff {
+ cur_collection: cur_cros_collection,
+ new_patches: new_cros_patches,
+ version_updates: cros_version_updates,
+ } = patch_parsing::new_patches(
&cros_patches_path,
&ctx.old_cros_patch_contents(&args.old_cros_ref)?,
"chromiumos",
)
.context("finding new patches for chromiumos")?;
- let (cur_android_collection, new_android_patches) = patch_parsing::new_patches(
+ let patch_parsing::PatchTemporalDiff {
+ cur_collection: cur_android_collection,
+ new_patches: new_android_patches,
+ version_updates: android_version_updates,
+ } = patch_parsing::new_patches(
&android_patches_path,
&ctx.old_android_patch_contents(&args.old_android_ref)?,
"android",
@@ -164,7 +176,7 @@ fn transpose_subcmd(args: TransposeOpt) -> Result<()> {
})?
};
let new_android_patches = new_android_patches.filter_patches(|p| {
- match (p.get_start_version(), p.get_end_version()) {
+ match (p.get_from_version(), p.get_until_version()) {
(Some(start), Some(end)) => start <= android_llvm_version && android_llvm_version < end,
(Some(start), None) => start <= android_llvm_version,
(None, Some(end)) => android_llvm_version < end,
@@ -172,9 +184,17 @@ fn transpose_subcmd(args: TransposeOpt) -> Result<()> {
}
});
+ // Need to filter version updates to only existing patches to the other platform.
+ let cros_version_updates =
+ filter_version_changes(cros_version_updates, &cur_android_collection);
+ let android_version_updates =
+ filter_version_changes(android_version_updates, &cur_cros_collection);
+
if args.verbose {
- display_patches("New patches from Chromium OS", &new_cros_patches);
+ display_patches("New patches from ChromiumOS", &new_cros_patches);
+ display_version_updates("Version updates from ChromiumOS", &cros_version_updates);
display_patches("New patches from Android", &new_android_patches);
+ display_version_updates("Version updates from Android", &android_version_updates);
}
if args.dry_run {
@@ -188,9 +208,11 @@ fn transpose_subcmd(args: TransposeOpt) -> Result<()> {
ModifyOpt {
new_cros_patches,
cur_cros_collection,
+ cros_version_updates,
cros_reviewers: args.cros_reviewers,
new_android_patches,
cur_android_collection,
+ android_version_updates,
android_reviewers: args.android_reviewers,
},
)
@@ -199,9 +221,11 @@ fn transpose_subcmd(args: TransposeOpt) -> Result<()> {
struct ModifyOpt {
new_cros_patches: PatchCollection,
cur_cros_collection: PatchCollection,
+ cros_version_updates: Vec<(String, Option<VersionRange>)>,
cros_reviewers: Vec<String>,
new_android_patches: PatchCollection,
cur_android_collection: PatchCollection,
+ android_version_updates: Vec<(String, Option<VersionRange>)>,
android_reviewers: Vec<String>,
}
@@ -213,11 +237,16 @@ fn modify_repos(ctx: &RepoSetupContext, no_commit: bool, opt: ModifyOpt) -> Resu
// Transpose Patches -----------------------------------------------------
let mut cur_android_collection = opt.cur_android_collection;
let mut cur_cros_collection = opt.cur_cros_collection;
- if !opt.new_cros_patches.is_empty() {
+ // Apply any version ranges and new patches, then write out.
+ if !opt.new_cros_patches.is_empty() || !opt.cros_version_updates.is_empty() {
+ cur_android_collection =
+ cur_android_collection.update_version_ranges(&opt.cros_version_updates);
opt.new_cros_patches
.transpose_write(&mut cur_android_collection)?;
}
- if !opt.new_android_patches.is_empty() {
+ if !opt.new_android_patches.is_empty() || !opt.android_version_updates.is_empty() {
+ cur_cros_collection =
+ cur_cros_collection.update_version_ranges(&opt.android_version_updates);
opt.new_android_patches
.transpose_write(&mut cur_cros_collection)?;
}
@@ -246,6 +275,25 @@ fn modify_repos(ctx: &RepoSetupContext, no_commit: bool, opt: ModifyOpt) -> Resu
Ok(())
}
+/// Filter version changes that can't apply to a given collection.
+fn filter_version_changes<T>(
+ version_updates: T,
+ other_platform_collection: &PatchCollection,
+) -> Vec<(String, Option<VersionRange>)>
+where
+ T: IntoIterator<Item = (String, Option<VersionRange>)>,
+{
+ version_updates
+ .into_iter()
+ .filter(|(rel_patch_path, _)| {
+ other_platform_collection
+ .patches
+ .iter()
+ .any(|p| &p.rel_patch_path == rel_patch_path)
+ })
+ .collect()
+}
+
fn display_patches(prelude: &str, collection: &PatchCollection) {
println!("{}", prelude);
if collection.patches.is_empty() {
@@ -255,6 +303,17 @@ fn display_patches(prelude: &str, collection: &PatchCollection) {
println!("{}", collection);
}
+fn display_version_updates(prelude: &str, version_updates: &[(String, Option<VersionRange>)]) {
+ println!("{}", prelude);
+ if version_updates.is_empty() {
+ println!(" [No Version Changes]");
+ return;
+ }
+ for (rel_patch_path, _) in version_updates {
+ println!("* {}", rel_patch_path);
+ }
+}
+
#[derive(Debug, structopt::StructOpt)]
#[structopt(name = "patch_sync", about = "A pipeline for syncing the patch code")]
enum Opt {
@@ -281,7 +340,7 @@ enum Opt {
#[structopt(long = "cros-checkout", parse(from_os_str))]
cros_checkout_path: PathBuf,
- /// Emails to send review requests to during Chromium OS upload.
+ /// Emails to send review requests to during ChromiumOS upload.
/// Comma separated.
#[structopt(long = "cros-rev")]
cros_reviewers: Option<String>,
diff --git a/llvm_tools/patch_sync/src/patch_parsing.rs b/llvm_tools/patch_sync/src/patch_parsing.rs
index 124f0d6f..00153834 100644
--- a/llvm_tools/patch_sync/src/patch_parsing.rs
+++ b/llvm_tools/patch_sync/src/patch_parsing.rs
@@ -1,3 +1,7 @@
+// Copyright 2022 The ChromiumOS Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
use std::collections::{BTreeMap, BTreeSet};
use std::fs::{copy, File};
use std::io::{BufRead, BufReader, Read, Write};
@@ -8,42 +12,31 @@ use serde::{Deserialize, Serialize};
use sha2::{Digest, Sha256};
/// JSON serde struct.
-// FIXME(b/221489531): Remove when we clear out start_version and
-// end_version.
-#[derive(Debug, Clone, Serialize, Deserialize)]
+#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)]
pub struct PatchDictSchema {
- /// [deprecated(since = "1.1", note = "Use version_range")]
- #[serde(skip_serializing_if = "Option::is_none")]
- pub end_version: Option<u64>,
pub metadata: Option<BTreeMap<String, serde_json::Value>>,
#[serde(default, skip_serializing_if = "BTreeSet::is_empty")]
pub platforms: BTreeSet<String>,
pub rel_patch_path: String,
- /// [deprecated(since = "1.1", note = "Use version_range")]
- #[serde(skip_serializing_if = "Option::is_none")]
- pub start_version: Option<u64>,
pub version_range: Option<VersionRange>,
}
-#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
+#[derive(Clone, Copy, Debug, Eq, PartialEq, Serialize, Deserialize)]
pub struct VersionRange {
pub from: Option<u64>,
pub until: Option<u64>,
}
-// FIXME(b/221489531): Remove when we clear out start_version and
-// end_version.
impl PatchDictSchema {
- pub fn get_start_version(&self) -> Option<u64> {
- self.version_range
- .map(|x| x.from)
- .unwrap_or(self.start_version)
+ /// Return the first version this patch applies to.
+ pub fn get_from_version(&self) -> Option<u64> {
+ self.version_range.and_then(|x| x.from)
}
- pub fn get_end_version(&self) -> Option<u64> {
- self.version_range
- .map(|x| x.until)
- .unwrap_or(self.end_version)
+ /// Return the version after the last version this patch
+ /// applies to.
+ pub fn get_until_version(&self) -> Option<u64> {
+ self.version_range.and_then(|x| x.until)
}
}
@@ -129,6 +122,61 @@ impl PatchCollection {
)
}
+ /// Vec of every PatchDictSchema with differing
+ /// version ranges but the same rel_patch_paths.
+ fn version_range_diffs(&self, other: &Self) -> Vec<(String, Option<VersionRange>)> {
+ let other_map: BTreeMap<_, _> = other
+ .patches
+ .iter()
+ .map(|p| (p.rel_patch_path.clone(), p))
+ .collect();
+ self.patches
+ .iter()
+ .filter_map(|ours| match other_map.get(&ours.rel_patch_path) {
+ Some(theirs) => {
+ if ours.get_from_version() != theirs.get_from_version()
+ || ours.get_until_version() != theirs.get_until_version()
+ {
+ Some((ours.rel_patch_path.clone(), ours.version_range))
+ } else {
+ None
+ }
+ }
+ _ => None,
+ })
+ .collect()
+ }
+
+ /// Given a vector of tuples with (rel_patch_path, Option<VersionRange>), replace
+ /// all version ranges in this collection with a matching one in the new_versions parameter.
+ pub fn update_version_ranges(&self, new_versions: &[(String, Option<VersionRange>)]) -> Self {
+ // new_versions should be really tiny (len() <= 2 for the most part), so
+ // the overhead of O(1) lookups is not worth it.
+ let get_updated_version = |rel_patch_path: &str| -> Option<Option<VersionRange>> {
+ // The first Option indicates whether we are updating it at all.
+ // The second Option indicates we can update it with None.
+ new_versions
+ .iter()
+ .find(|i| i.0 == rel_patch_path)
+ .map(|x| x.1)
+ };
+ let cloned_patches = self
+ .patches
+ .iter()
+ .map(|p| match get_updated_version(&p.rel_patch_path) {
+ Some(version_range) => PatchDictSchema {
+ version_range,
+ ..p.clone()
+ },
+ _ => p.clone(),
+ })
+ .collect();
+ Self {
+ workdir: self.workdir.clone(),
+ patches: cloned_patches,
+ }
+ }
+
fn union_helper(
&self,
other: &Self,
@@ -162,8 +210,6 @@ impl PatchCollection {
// ii.
combined_patches.push(PatchDictSchema {
rel_patch_path: p.rel_patch_path.clone(),
- start_version: p.start_version,
- end_version: p.end_version,
platforms: new_platforms,
metadata: p.metadata.clone(),
version_range: p.version_range,
@@ -264,25 +310,38 @@ impl std::fmt::Display for PatchCollection {
}
}
+/// Represents information which changed between now and an old version of a PATCHES.json file.
+pub struct PatchTemporalDiff {
+ pub cur_collection: PatchCollection,
+ pub new_patches: PatchCollection,
+ // Store version_updates as a vec, not a map, as it's likely to be very small (<=2),
+ // and the overhead of using a O(1) look up structure isn't worth it.
+ pub version_updates: Vec<(String, Option<VersionRange>)>,
+}
+
/// Generate a PatchCollection incorporating only the diff between current patches and old patch
/// contents.
pub fn new_patches(
patches_path: &Path,
old_patch_contents: &str,
platform: &str,
-) -> Result<(PatchCollection, PatchCollection)> {
+) -> Result<PatchTemporalDiff> {
+ // Set up the current patch collection.
let cur_collection = PatchCollection::parse_from_file(patches_path)
.with_context(|| format!("parsing {} PATCHES.json", platform))?;
let cur_collection = filter_patches_by_platform(&cur_collection, platform);
let cur_collection = cur_collection.filter_patches(|p| cur_collection.patch_exists(p));
- let new_patches: PatchCollection = {
- let old_collection = PatchCollection::parse_from_str(
- patches_path.parent().unwrap().to_path_buf(),
- old_patch_contents,
- )?;
- let old_collection = old_collection.filter_patches(|p| old_collection.patch_exists(p));
- cur_collection.subtract(&old_collection)?
- };
+
+ // Set up the old patch collection.
+ let old_collection = PatchCollection::parse_from_str(
+ patches_path.parent().unwrap().to_path_buf(),
+ old_patch_contents,
+ )?;
+ let old_collection = old_collection.filter_patches(|p| old_collection.patch_exists(p));
+
+ // Set up the differential values
+ let version_updates = cur_collection.version_range_diffs(&old_collection);
+ let new_patches: PatchCollection = cur_collection.subtract(&old_collection)?;
let new_patches = new_patches.map_patches(|p| {
let mut platforms = BTreeSet::new();
platforms.extend(["android".to_string(), "chromiumos".to_string()]);
@@ -291,7 +350,11 @@ pub fn new_patches(
..p.to_owned()
}
});
- Ok((cur_collection, new_patches))
+ Ok(PatchTemporalDiff {
+ cur_collection,
+ new_patches,
+ version_updates,
+ })
}
/// Create a new collection with only the patches that apply to the
@@ -383,8 +446,6 @@ mod test {
#[test]
fn test_union() {
let patch1 = PatchDictSchema {
- start_version: Some(0),
- end_version: Some(1),
rel_patch_path: "a".into(),
metadata: None,
platforms: BTreeSet::from(["x".into()]),
@@ -431,8 +492,6 @@ mod test {
#[test]
fn test_union_empties() {
let patch1 = PatchDictSchema {
- start_version: Some(0),
- end_version: Some(1),
rel_patch_path: "a".into(),
metadata: None,
platforms: Default::default(),
@@ -459,4 +518,74 @@ mod test {
assert_eq!(union.patches.len(), 1);
assert_eq!(union.patches[0].platforms.len(), 0);
}
+
+ #[test]
+ fn test_version_differentials() {
+ let fixture = version_range_fixture();
+ let diff = fixture[0].version_range_diffs(&fixture[1]);
+ assert_eq!(diff.len(), 1);
+ assert_eq!(
+ &diff,
+ &[(
+ "a".to_string(),
+ Some(VersionRange {
+ from: Some(0),
+ until: Some(1)
+ })
+ )]
+ );
+ let diff = fixture[1].version_range_diffs(&fixture[2]);
+ assert_eq!(diff.len(), 0);
+ }
+
+ #[test]
+ fn test_version_updates() {
+ let fixture = version_range_fixture();
+ let collection = fixture[0].update_version_ranges(&[("a".into(), None)]);
+ assert_eq!(collection.patches[0].version_range, None);
+ assert_eq!(collection.patches[1], fixture[1].patches[1]);
+ let new_version_range = Some(VersionRange {
+ from: Some(42),
+ until: Some(43),
+ });
+ let collection = fixture[0].update_version_ranges(&[("a".into(), new_version_range)]);
+ assert_eq!(collection.patches[0].version_range, new_version_range);
+ assert_eq!(collection.patches[1], fixture[1].patches[1]);
+ }
+
+ fn version_range_fixture() -> Vec<PatchCollection> {
+ let patch1 = PatchDictSchema {
+ rel_patch_path: "a".into(),
+ metadata: None,
+ platforms: Default::default(),
+ version_range: Some(VersionRange {
+ from: Some(0),
+ until: Some(1),
+ }),
+ };
+ let patch1_updated = PatchDictSchema {
+ version_range: Some(VersionRange {
+ from: Some(0),
+ until: Some(3),
+ }),
+ ..patch1.clone()
+ };
+ let patch2 = PatchDictSchema {
+ rel_patch_path: "b".into(),
+ ..patch1.clone()
+ };
+ let collection1 = PatchCollection {
+ workdir: PathBuf::new(),
+ patches: vec![patch1, patch2.clone()],
+ };
+ let collection2 = PatchCollection {
+ workdir: PathBuf::new(),
+ patches: vec![patch1_updated, patch2.clone()],
+ };
+ let collection3 = PatchCollection {
+ workdir: PathBuf::new(),
+ patches: vec![patch2],
+ };
+ vec![collection1, collection2, collection3]
+ }
}
diff --git a/llvm_tools/patch_sync/src/version_control.rs b/llvm_tools/patch_sync/src/version_control.rs
index e07d39d6..fc6211ae 100644
--- a/llvm_tools/patch_sync/src/version_control.rs
+++ b/llvm_tools/patch_sync/src/version_control.rs
@@ -1,3 +1,7 @@
+// Copyright 2022 The ChromiumOS Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
use anyhow::{anyhow, bail, ensure, Context, Result};
use regex::Regex;
use std::ffi::OsStr;
@@ -8,11 +12,12 @@ use std::process::{Command, Output};
const CHROMIUMOS_OVERLAY_REL_PATH: &str = "src/third_party/chromiumos-overlay";
const ANDROID_LLVM_REL_PATH: &str = "toolchain/llvm_android";
-const CROS_MAIN_BRANCH: &str = "main";
-const ANDROID_MAIN_BRANCH: &str = "master"; // nocheck
+// Need to checkout the upstream, rather than the local clone.
+const CROS_MAIN_BRANCH: &str = "cros/main";
+const ANDROID_MAIN_BRANCH: &str = "aosp/master"; // nocheck
const WORK_BRANCH_NAME: &str = "__patch_sync_tmp";
-/// Context struct to keep track of both Chromium OS and Android checkouts.
+/// Context struct to keep track of both ChromiumOS and Android checkouts.
#[derive(Debug)]
pub struct RepoSetupContext {
pub cros_checkout: PathBuf,
@@ -135,14 +140,14 @@ impl RepoSetupContext {
.join("patches/PATCHES.json")
}
- /// Get the Chromium OS path to the PATCHES.json file
+ /// Get the ChromiumOS path to the PATCHES.json file
pub fn cros_patches_path(&self) -> PathBuf {
self.cros_checkout
.join(&CHROMIUMOS_OVERLAY_REL_PATH)
.join("sys-devel/llvm/files/PATCHES.json")
}
- /// Return the contents of the old PATCHES.json from Chromium OS
+ /// Return the contents of the old PATCHES.json from ChromiumOS
pub fn old_cros_patch_contents(&self, hash: &str) -> Result<String> {
Self::old_file_contents(
hash,
diff --git a/llvm_tools/patch_utils.py b/llvm_tools/patch_utils.py
new file mode 100644
index 00000000..affb3d0d
--- /dev/null
+++ b/llvm_tools/patch_utils.py
@@ -0,0 +1,594 @@
+# Copyright 2022 The ChromiumOS Authors
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Provides patch utilities for PATCHES.json file handling."""
+
+import collections
+import contextlib
+import dataclasses
+import json
+from pathlib import Path
+import re
+import subprocess
+import sys
+from typing import Any, Dict, IO, Iterable, List, Optional, Tuple, Union
+
+
+CHECKED_FILE_RE = re.compile(r"^checking file\s+(.*)$")
+HUNK_FAILED_RE = re.compile(r"^Hunk #(\d+) FAILED at.*")
+HUNK_HEADER_RE = re.compile(r"^@@\s+-(\d+),(\d+)\s+\+(\d+),(\d+)\s+@@")
+HUNK_END_RE = re.compile(r"^--\s*$")
+PATCH_SUBFILE_HEADER_RE = re.compile(r"^\+\+\+ [ab]/(.*)$")
+
+
+@contextlib.contextmanager
+def atomic_write(fp: Union[Path, str], mode="w", *args, **kwargs):
+ """Write to a filepath atomically.
+
+ This works by a temp file swap, created with a .tmp suffix in
+ the same directory briefly until being renamed to the desired
+ filepath.
+
+ Args:
+ fp: Filepath to open.
+ mode: File mode; can be 'w', 'wb'. Default 'w'.
+ *args: Passed to Path.open as nargs.
+ **kwargs: Passed to Path.open as kwargs.
+
+ Raises:
+ ValueError when the mode is invalid.
+ """
+ if isinstance(fp, str):
+ fp = Path(fp)
+ if mode not in ("w", "wb"):
+ raise ValueError(f"mode {mode} not accepted")
+ temp_fp = fp.with_suffix(fp.suffix + ".tmp")
+ try:
+ with temp_fp.open(mode, *args, **kwargs) as f:
+ yield f
+ except:
+ if temp_fp.is_file():
+ temp_fp.unlink()
+ raise
+ temp_fp.rename(fp)
+
+
+@dataclasses.dataclass
+class Hunk:
+ """Represents a patch Hunk."""
+
+ hunk_id: int
+ """Hunk ID for the current file."""
+ orig_start: int
+ orig_hunk_len: int
+ patch_start: int
+ patch_hunk_len: int
+ patch_hunk_lineno_begin: int
+ patch_hunk_lineno_end: Optional[int]
+
+
+def parse_patch_stream(patch_stream: IO[str]) -> Dict[str, List[Hunk]]:
+ """Parse a patch file-like into Hunks.
+
+ Args:
+ patch_stream: A IO stream formatted like a git patch file.
+
+ Returns:
+ A dictionary mapping filenames to lists of Hunks present
+ in the patch stream.
+ """
+
+ current_filepath = None
+ current_hunk_id = 0
+ current_hunk = None
+ out = collections.defaultdict(list)
+ for lineno, line in enumerate(patch_stream.readlines()):
+ subfile_header = PATCH_SUBFILE_HEADER_RE.match(line)
+ if subfile_header:
+ current_filepath = subfile_header.group(1)
+ if not current_filepath:
+ raise RuntimeError("Could not get file header in patch stream")
+ # Need to reset the hunk id, as it's per-file.
+ current_hunk_id = 0
+ continue
+ hunk_header = HUNK_HEADER_RE.match(line)
+ if hunk_header:
+ if not current_filepath:
+ raise RuntimeError(
+ "Parsed hunk before file header in patch stream"
+ )
+ if current_hunk:
+ # Already parsing a hunk
+ current_hunk.patch_hunk_lineno_end = lineno
+ current_hunk_id += 1
+ current_hunk = Hunk(
+ hunk_id=current_hunk_id,
+ orig_start=int(hunk_header.group(1)),
+ orig_hunk_len=int(hunk_header.group(2)),
+ patch_start=int(hunk_header.group(3)),
+ patch_hunk_len=int(hunk_header.group(4)),
+ patch_hunk_lineno_begin=lineno + 1,
+ patch_hunk_lineno_end=None,
+ )
+ out[current_filepath].append(current_hunk)
+ continue
+ if current_hunk and HUNK_END_RE.match(line):
+ current_hunk.patch_hunk_lineno_end = lineno
+ return out
+
+
+def parse_failed_patch_output(text: str) -> Dict[str, List[int]]:
+ current_file = None
+ failed_hunks = collections.defaultdict(list)
+ for eline in text.split("\n"):
+ checked_file_match = CHECKED_FILE_RE.match(eline)
+ if checked_file_match:
+ current_file = checked_file_match.group(1)
+ continue
+ failed_match = HUNK_FAILED_RE.match(eline)
+ if failed_match:
+ if not current_file:
+ raise ValueError("Input stream was not parsable")
+ hunk_id = int(failed_match.group(1))
+ failed_hunks[current_file].append(hunk_id)
+ return failed_hunks
+
+
+@dataclasses.dataclass(frozen=True)
+class PatchResult:
+ """Result of a patch application."""
+
+ succeeded: bool
+ failed_hunks: Dict[str, List[Hunk]] = dataclasses.field(
+ default_factory=dict
+ )
+
+ def __bool__(self):
+ return self.succeeded
+
+ def failure_info(self) -> str:
+ if self.succeeded:
+ return ""
+ s = ""
+ for file, hunks in self.failed_hunks.items():
+ s += f"{file}:\n"
+ for h in hunks:
+ s += f"Lines {h.orig_start} to {h.orig_start + h.orig_hunk_len}\n"
+ s += "--------------------\n"
+ return s
+
+
+@dataclasses.dataclass
+class PatchEntry:
+ """Object mapping of an entry of PATCHES.json."""
+
+ workdir: Path
+ """Storage location for the patches."""
+ metadata: Optional[Dict[str, Any]]
+ platforms: Optional[List[str]]
+ rel_patch_path: str
+ version_range: Optional[Dict[str, Optional[int]]]
+ _parsed_hunks = None
+
+ def __post_init__(self):
+ if not self.workdir.is_dir():
+ raise ValueError(f"workdir {self.workdir} is not a directory")
+
+ @classmethod
+ def from_dict(cls, workdir: Path, data: Dict[str, Any]):
+ """Instatiate from a dictionary.
+
+ Dictionary must have at least the following key:
+
+ {
+ 'rel_patch_path': '<relative patch path to workdir>',
+ }
+
+ Returns:
+ A new PatchEntry.
+ """
+ return cls(
+ workdir,
+ data.get("metadata"),
+ data.get("platforms"),
+ data["rel_patch_path"],
+ data.get("version_range"),
+ )
+
+ def to_dict(self) -> Dict[str, Any]:
+ out: Dict[str, Any] = {
+ "metadata": self.metadata,
+ }
+ if self.platforms:
+ # To match patch_sync, only serialized when
+ # non-empty and non-null.
+ out["platforms"] = sorted(self.platforms)
+ out.update(
+ {
+ "rel_patch_path": self.rel_patch_path,
+ "version_range": self.version_range,
+ }
+ )
+ return out
+
+ def parsed_hunks(self) -> Dict[str, List[Hunk]]:
+ # Minor caching here because IO is slow.
+ if not self._parsed_hunks:
+ with self.patch_path().open(encoding="utf-8") as f:
+ self._parsed_hunks = parse_patch_stream(f)
+ return self._parsed_hunks
+
+ def patch_path(self) -> Path:
+ return self.workdir / self.rel_patch_path
+
+ def can_patch_version(self, svn_version: int) -> bool:
+ """Is this patch meant to apply to `svn_version`?"""
+ # Sometimes the key is there, but it's set to None.
+ if not self.version_range:
+ return True
+ from_v = self.version_range.get("from") or 0
+ until_v = self.version_range.get("until")
+ if until_v is None:
+ until_v = sys.maxsize
+ return from_v <= svn_version < until_v
+
+ def is_old(self, svn_version: int) -> bool:
+ """Is this patch old compared to `svn_version`?"""
+ if not self.version_range:
+ return False
+ until_v = self.version_range.get("until")
+ # Sometimes the key is there, but it's set to None.
+ if until_v is None:
+ until_v = sys.maxsize
+ return svn_version >= until_v
+
+ def apply(
+ self, root_dir: Path, extra_args: Optional[List[str]] = None
+ ) -> PatchResult:
+ """Apply a patch to a given directory."""
+ if not extra_args:
+ extra_args = []
+ # Cmd to apply a patch in the src unpack path.
+ abs_patch_path = self.patch_path().absolute()
+ if not abs_patch_path.is_file():
+ raise RuntimeError(
+ f"Cannot apply: patch {abs_patch_path} is not a file"
+ )
+ cmd = [
+ "patch",
+ "-d",
+ root_dir.absolute(),
+ "-f",
+ "-p1",
+ "--no-backup-if-mismatch",
+ "-i",
+ abs_patch_path,
+ ] + extra_args
+ try:
+ subprocess.run(
+ cmd, encoding="utf-8", check=True, stdout=subprocess.PIPE
+ )
+ except subprocess.CalledProcessError as e:
+ parsed_hunks = self.parsed_hunks()
+ failed_hunks_id_dict = parse_failed_patch_output(e.stdout)
+ failed_hunks = {}
+ for path, failed_hunk_ids in failed_hunks_id_dict.items():
+ hunks_for_file = parsed_hunks[path]
+ failed_hunks[path] = [
+ hunk
+ for hunk in hunks_for_file
+ if hunk.hunk_id in failed_hunk_ids
+ ]
+ return PatchResult(succeeded=False, failed_hunks=failed_hunks)
+ return PatchResult(succeeded=True)
+
+ def test_apply(self, root_dir: Path) -> PatchResult:
+ """Dry run applying a patch to a given directory."""
+ return self.apply(root_dir, ["--dry-run"])
+
+ def title(self) -> str:
+ if not self.metadata:
+ return ""
+ return self.metadata.get("title", "")
+
+
+@dataclasses.dataclass(frozen=True)
+class PatchInfo:
+ """Holds info for a round of patch applications."""
+
+ # str types are legacy. Patch lists should
+ # probably be PatchEntries,
+ applied_patches: List[PatchEntry]
+ failed_patches: List[PatchEntry]
+ # Can be deleted once legacy code is removed.
+ non_applicable_patches: List[str]
+ # Can be deleted once legacy code is removed.
+ disabled_patches: List[str]
+ # Can be deleted once legacy code is removed.
+ removed_patches: List[str]
+ # Can be deleted once legacy code is removed.
+ modified_metadata: Optional[str]
+
+ def _asdict(self):
+ return dataclasses.asdict(self)
+
+
+def json_to_patch_entries(workdir: Path, json_fd: IO[str]) -> List[PatchEntry]:
+ """Convert a json IO object to List[PatchEntry].
+
+ Examples:
+ >>> f = open('PATCHES.json')
+ >>> patch_entries = json_to_patch_entries(Path(), f)
+ """
+ return [PatchEntry.from_dict(workdir, d) for d in json.load(json_fd)]
+
+
+def _print_failed_patch(pe: PatchEntry, failed_hunks: Dict[str, List[Hunk]]):
+ """Print information about a single failing PatchEntry.
+
+ Args:
+ pe: A PatchEntry that failed.
+ failed_hunks: Hunks for pe which failed as dict:
+ filepath: [Hunk...]
+ """
+ print(f"Could not apply {pe.rel_patch_path}: {pe.title()}", file=sys.stderr)
+ for fp, hunks in failed_hunks.items():
+ print(f"{fp}:", file=sys.stderr)
+ for h in hunks:
+ print(
+ f"- {pe.rel_patch_path} "
+ f"l:{h.patch_hunk_lineno_begin}...{h.patch_hunk_lineno_end}",
+ file=sys.stderr,
+ )
+
+
+def apply_all_from_json(
+ svn_version: int,
+ llvm_src_dir: Path,
+ patches_json_fp: Path,
+ continue_on_failure: bool = False,
+) -> PatchInfo:
+ """Attempt to apply some patches to a given LLVM source tree.
+
+ This relies on a PATCHES.json file to be the primary way
+ the patches are applied.
+
+ Args:
+ svn_version: LLVM Subversion revision to patch.
+ llvm_src_dir: llvm-project root-level source directory to patch.
+ patches_json_fp: Filepath to the PATCHES.json file.
+ continue_on_failure: Skip any patches which failed to apply,
+ rather than throw an Exception.
+ """
+ with patches_json_fp.open(encoding="utf-8") as f:
+ patches = json_to_patch_entries(patches_json_fp.parent, f)
+ skipped_patches = []
+ failed_patches = []
+ applied_patches = []
+ for pe in patches:
+ applied, failed_hunks = apply_single_patch_entry(
+ svn_version, llvm_src_dir, pe
+ )
+ if applied:
+ applied_patches.append(pe)
+ continue
+ if failed_hunks is not None:
+ if continue_on_failure:
+ failed_patches.append(pe)
+ continue
+ else:
+ _print_failed_patch(pe, failed_hunks)
+ raise RuntimeError(
+ "failed to apply patch " f"{pe.patch_path()}: {pe.title()}"
+ )
+ # Didn't apply, didn't fail, it was skipped.
+ skipped_patches.append(pe)
+ return PatchInfo(
+ non_applicable_patches=skipped_patches,
+ applied_patches=applied_patches,
+ failed_patches=failed_patches,
+ disabled_patches=[],
+ removed_patches=[],
+ modified_metadata=None,
+ )
+
+
+def apply_single_patch_entry(
+ svn_version: int,
+ llvm_src_dir: Path,
+ pe: PatchEntry,
+ ignore_version_range: bool = False,
+) -> Tuple[bool, Optional[Dict[str, List[Hunk]]]]:
+ """Try to apply a single PatchEntry object.
+
+ Returns:
+ Tuple where the first element indicates whether the patch applied,
+ and the second element is a faild hunk mapping from file name to lists of
+ hunks (if the patch didn't apply).
+ """
+ # Don't apply patches outside of the version range.
+ if not ignore_version_range and not pe.can_patch_version(svn_version):
+ return False, None
+ # Test first to avoid making changes.
+ test_application = pe.test_apply(llvm_src_dir)
+ if not test_application:
+ return False, test_application.failed_hunks
+ # Now actually make changes.
+ application_result = pe.apply(llvm_src_dir)
+ if not application_result:
+ # This should be very rare/impossible.
+ return False, application_result.failed_hunks
+ return True, None
+
+
+def is_git_dirty(git_root_dir: Path) -> bool:
+ """Return whether the given git directory has uncommitted changes."""
+ if not git_root_dir.is_dir():
+ raise ValueError(f"git_root_dir {git_root_dir} is not a directory")
+ cmd = ["git", "ls-files", "-m", "--other", "--exclude-standard"]
+ return (
+ subprocess.run(
+ cmd,
+ stdout=subprocess.PIPE,
+ check=True,
+ cwd=git_root_dir,
+ encoding="utf-8",
+ ).stdout
+ != ""
+ )
+
+
+def clean_src_tree(src_path):
+ """Cleans the source tree of the changes made in 'src_path'."""
+
+ reset_src_tree_cmd = ["git", "-C", src_path, "reset", "HEAD", "--hard"]
+
+ subprocess.run(reset_src_tree_cmd, check=True)
+
+ clean_src_tree_cmd = ["git", "-C", src_path, "clean", "-fd"]
+
+ subprocess.run(clean_src_tree_cmd, check=True)
+
+
+@contextlib.contextmanager
+def git_clean_context(git_root_dir: Path):
+ """Cleans up a git directory when the context exits."""
+ if is_git_dirty(git_root_dir):
+ raise RuntimeError("Cannot setup clean context; git_root_dir is dirty")
+ try:
+ yield
+ finally:
+ clean_src_tree(git_root_dir)
+
+
+def _write_json_changes(patches: List[Dict[str, Any]], file_io: IO[str]):
+ """Write JSON changes to file, does not acquire new file lock."""
+ json.dump(patches, file_io, indent=4, separators=(",", ": "))
+ # Need to add a newline as json.dump omits it.
+ file_io.write("\n")
+
+
+def update_version_ranges(
+ svn_version: int, llvm_src_dir: Path, patches_json_fp: Path
+) -> PatchInfo:
+ """Reduce the version ranges of failing patches.
+
+ Patches which fail to apply will have their 'version_range.until'
+ field reduced to the passed in svn_version.
+
+ Modifies the contents of patches_json_fp.
+
+ Args:
+ svn_version: LLVM revision number.
+ llvm_src_dir: llvm-project directory path.
+ patches_json_fp: Filepath to the PATCHES.json file.
+
+ Returns:
+ PatchInfo for applied and disabled patches.
+ """
+ with patches_json_fp.open(encoding="utf-8") as f:
+ patch_entries = json_to_patch_entries(
+ patches_json_fp.parent,
+ f,
+ )
+ modified_entries, applied_patches = update_version_ranges_with_entries(
+ svn_version, llvm_src_dir, patch_entries
+ )
+ with atomic_write(patches_json_fp, encoding="utf-8") as f:
+ _write_json_changes([p.to_dict() for p in patch_entries], f)
+ for entry in modified_entries:
+ print(
+ f"Stopped applying {entry.rel_patch_path} ({entry.title()}) "
+ f"for r{svn_version}"
+ )
+ return PatchInfo(
+ non_applicable_patches=[],
+ applied_patches=applied_patches,
+ failed_patches=[],
+ disabled_patches=[p.rel_patch_path for p in modified_entries],
+ removed_patches=[],
+ modified_metadata=str(patches_json_fp) if modified_entries else None,
+ )
+
+
+def update_version_ranges_with_entries(
+ svn_version: int,
+ llvm_src_dir: Path,
+ patch_entries: Iterable[PatchEntry],
+) -> Tuple[List[PatchEntry], List[PatchEntry]]:
+ """Test-able helper for UpdateVersionRanges.
+
+ Args:
+ svn_version: LLVM revision number.
+ llvm_src_dir: llvm-project directory path.
+ patch_entries: PatchEntry objects to modify.
+
+ Returns:
+ Tuple of (modified entries, applied patches)
+
+ Post:
+ Modifies patch_entries in place.
+ """
+ modified_entries: List[PatchEntry] = []
+ applied_patches: List[PatchEntry] = []
+ active_patches = (pe for pe in patch_entries if not pe.is_old(svn_version))
+ with git_clean_context(llvm_src_dir):
+ for pe in active_patches:
+ test_result = pe.test_apply(llvm_src_dir)
+ if not test_result:
+ if pe.version_range is None:
+ pe.version_range = {}
+ pe.version_range["until"] = svn_version
+ modified_entries.append(pe)
+ else:
+ # We have to actually apply the patch so that future patches
+ # will stack properly.
+ if not pe.apply(llvm_src_dir).succeeded:
+ raise RuntimeError(
+ "Could not apply patch that dry ran successfully"
+ )
+ applied_patches.append(pe)
+
+ return modified_entries, applied_patches
+
+
+def remove_old_patches(
+ svn_version: int, llvm_src_dir: Path, patches_json_fp: Path
+) -> PatchInfo:
+ """Remove patches that don't and will never apply for the future.
+
+ Patches are determined to be "old" via the "is_old" method for
+ each patch entry.
+
+ Args:
+ svn_version: LLVM SVN version.
+ llvm_src_dir: LLVM source directory.
+ patches_json_fp: Location to edit patches on.
+
+ Returns:
+ PatchInfo for modified patches.
+ """
+ with patches_json_fp.open(encoding="utf-8") as f:
+ patches_list = json.load(f)
+ patch_entries = (
+ PatchEntry.from_dict(llvm_src_dir, elem) for elem in patches_list
+ )
+ oldness = [(entry, entry.is_old(svn_version)) for entry in patch_entries]
+ filtered_entries = [entry.to_dict() for entry, old in oldness if not old]
+ with atomic_write(patches_json_fp, encoding="utf-8") as f:
+ _write_json_changes(filtered_entries, f)
+ removed_entries = [entry for entry, old in oldness if old]
+ plural_patches = "patch" if len(removed_entries) == 1 else "patches"
+ print(f"Removed {len(removed_entries)} old {plural_patches}:")
+ for r in removed_entries:
+ print(f"- {r.rel_patch_path}: {r.title()}")
+
+ return PatchInfo(
+ non_applicable_patches=[],
+ applied_patches=[],
+ failed_patches=[],
+ disabled_patches=[],
+ removed_patches=[p.rel_patch_path for p in removed_entries],
+ modified_metadata=str(patches_json_fp) if removed_entries else None,
+ )
diff --git a/llvm_tools/patch_utils_unittest.py b/llvm_tools/patch_utils_unittest.py
new file mode 100755
index 00000000..b8c21390
--- /dev/null
+++ b/llvm_tools/patch_utils_unittest.py
@@ -0,0 +1,381 @@
+#!/usr/bin/env python3
+# Copyright 2022 The ChromiumOS Authors
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Unit tests for the patch_utils.py file."""
+
+import io
+import json
+from pathlib import Path
+import subprocess
+import tempfile
+from typing import Callable
+import unittest
+from unittest import mock
+
+import patch_utils as pu
+
+
+class TestPatchUtils(unittest.TestCase):
+ """Test the patch_utils."""
+
+ def test_atomic_write(self):
+ """Test that atomic write safely writes."""
+ prior_contents = "This is a test written by patch_utils_unittest.py\n"
+ new_contents = "I am a test written by patch_utils_unittest.py\n"
+ with tempfile.TemporaryDirectory(
+ prefix="patch_utils_unittest"
+ ) as dirname:
+ dirpath = Path(dirname)
+ filepath = dirpath / "test_atomic_write.txt"
+ with filepath.open("w", encoding="utf-8") as f:
+ f.write(prior_contents)
+
+ def _t():
+ with pu.atomic_write(filepath, encoding="utf-8") as f:
+ f.write(new_contents)
+ raise Exception("Expected failure")
+
+ self.assertRaises(Exception, _t)
+ with filepath.open(encoding="utf-8") as f:
+ lines = f.readlines()
+ self.assertEqual(lines[0], prior_contents)
+ with pu.atomic_write(filepath, encoding="utf-8") as f:
+ f.write(new_contents)
+ with filepath.open(encoding="utf-8") as f:
+ lines = f.readlines()
+ self.assertEqual(lines[0], new_contents)
+
+ def test_from_to_dict(self):
+ """Test to and from dict conversion."""
+ d = TestPatchUtils._default_json_dict()
+ d["metadata"] = {
+ "title": "hello world",
+ "info": [],
+ "other_extra_info": {
+ "extra_flags": [],
+ },
+ }
+ e = pu.PatchEntry.from_dict(TestPatchUtils._mock_dir(), d)
+ self.assertEqual(d, e.to_dict())
+
+ def test_patch_path(self):
+ """Test that we can get the full path from a PatchEntry."""
+ d = TestPatchUtils._default_json_dict()
+ with mock.patch.object(Path, "is_dir", return_value=True):
+ entry = pu.PatchEntry.from_dict(Path("/home/dir"), d)
+ self.assertEqual(
+ entry.patch_path(), Path("/home/dir") / d["rel_patch_path"]
+ )
+
+ def test_can_patch_version(self):
+ """Test that patch application based on version is correct."""
+ base_dict = TestPatchUtils._default_json_dict()
+ workdir = TestPatchUtils._mock_dir()
+ e1 = pu.PatchEntry.from_dict(workdir, base_dict)
+ self.assertFalse(e1.can_patch_version(3))
+ self.assertTrue(e1.can_patch_version(4))
+ self.assertTrue(e1.can_patch_version(5))
+ self.assertFalse(e1.can_patch_version(9))
+ base_dict["version_range"] = {"until": 9}
+ e2 = pu.PatchEntry.from_dict(workdir, base_dict)
+ self.assertTrue(e2.can_patch_version(0))
+ self.assertTrue(e2.can_patch_version(5))
+ self.assertFalse(e2.can_patch_version(9))
+ base_dict["version_range"] = {"from": 4}
+ e3 = pu.PatchEntry.from_dict(workdir, base_dict)
+ self.assertFalse(e3.can_patch_version(3))
+ self.assertTrue(e3.can_patch_version(5))
+ self.assertTrue(e3.can_patch_version(1 << 31))
+ base_dict["version_range"] = {"from": 4, "until": None}
+ e4 = pu.PatchEntry.from_dict(workdir, base_dict)
+ self.assertFalse(e4.can_patch_version(3))
+ self.assertTrue(e4.can_patch_version(5))
+ self.assertTrue(e4.can_patch_version(1 << 31))
+ base_dict["version_range"] = {"from": None, "until": 9}
+ e5 = pu.PatchEntry.from_dict(workdir, base_dict)
+ self.assertTrue(e5.can_patch_version(0))
+ self.assertTrue(e5.can_patch_version(5))
+ self.assertFalse(e5.can_patch_version(9))
+
+ def test_can_parse_from_json(self):
+ """Test that patches be loaded from json."""
+ patches_json = """
+[
+ {
+ "metadata": {},
+ "platforms": [],
+ "rel_patch_path": "cherry/nowhere.patch",
+ "version_range": {}
+ },
+ {
+ "metadata": {},
+ "rel_patch_path": "cherry/somewhere.patch",
+ "version_range": {}
+ },
+ {
+ "rel_patch_path": "where.patch",
+ "version_range": null
+ },
+ {
+ "rel_patch_path": "cherry/anywhere.patch"
+ }
+]
+ """
+ result = pu.json_to_patch_entries(Path(), io.StringIO(patches_json))
+ self.assertEqual(len(result), 4)
+
+ def test_parsed_hunks(self):
+ """Test that we can parse patch file hunks."""
+ m = mock.mock_open(read_data=_EXAMPLE_PATCH)
+
+ def mocked_open(self, *args, **kwargs):
+ return m(self, *args, **kwargs)
+
+ with mock.patch.object(Path, "open", mocked_open):
+ e = pu.PatchEntry.from_dict(
+ TestPatchUtils._mock_dir(), TestPatchUtils._default_json_dict()
+ )
+ hunk_dict = e.parsed_hunks()
+
+ m.assert_called()
+ filename1 = "clang/lib/Driver/ToolChains/Clang.cpp"
+ filename2 = "llvm/lib/Passes/PassBuilder.cpp"
+ self.assertEqual(set(hunk_dict.keys()), {filename1, filename2})
+ hunk_list1 = hunk_dict[filename1]
+ hunk_list2 = hunk_dict[filename2]
+ self.assertEqual(len(hunk_list1), 1)
+ self.assertEqual(len(hunk_list2), 2)
+
+ def test_apply_when_patch_nonexistent(self):
+ """Test that we error out when we try to apply a non-existent patch."""
+ src_dir = TestPatchUtils._mock_dir("somewhere/llvm-project")
+ patch_dir = TestPatchUtils._mock_dir()
+ e = pu.PatchEntry.from_dict(
+ patch_dir, TestPatchUtils._default_json_dict()
+ )
+ with mock.patch("subprocess.run", mock.MagicMock()):
+ self.assertRaises(RuntimeError, lambda: e.apply(src_dir))
+
+ def test_apply_success(self):
+ """Test that we can call apply."""
+ src_dir = TestPatchUtils._mock_dir("somewhere/llvm-project")
+ patch_dir = TestPatchUtils._mock_dir()
+ e = pu.PatchEntry.from_dict(
+ patch_dir, TestPatchUtils._default_json_dict()
+ )
+ with mock.patch("pathlib.Path.is_file", return_value=True):
+ with mock.patch("subprocess.run", mock.MagicMock()):
+ result = e.apply(src_dir)
+ self.assertTrue(result.succeeded)
+
+ def test_parse_failed_patch_output(self):
+ """Test that we can call parse `patch` output."""
+ fixture = """
+checking file a/b/c.cpp
+Hunk #1 SUCCEEDED at 96 with fuzz 1.
+Hunk #12 FAILED at 77.
+Hunk #42 FAILED at 1979.
+checking file x/y/z.h
+Hunk #4 FAILED at 30.
+checking file works.cpp
+Hunk #1 SUCCEEDED at 96 with fuzz 1.
+"""
+ result = pu.parse_failed_patch_output(fixture)
+ self.assertEqual(result["a/b/c.cpp"], [12, 42])
+ self.assertEqual(result["x/y/z.h"], [4])
+ self.assertNotIn("works.cpp", result)
+
+ def test_is_git_dirty(self):
+ """Test if a git directory has uncommitted changes."""
+ with tempfile.TemporaryDirectory(
+ prefix="patch_utils_unittest"
+ ) as dirname:
+ dirpath = Path(dirname)
+
+ def _run_h(cmd):
+ subprocess.run(
+ cmd,
+ cwd=dirpath,
+ stdout=subprocess.DEVNULL,
+ stderr=subprocess.DEVNULL,
+ check=True,
+ )
+
+ _run_h(["git", "init"])
+ self.assertFalse(pu.is_git_dirty(dirpath))
+ test_file = dirpath / "test_file"
+ test_file.touch()
+ self.assertTrue(pu.is_git_dirty(dirpath))
+ _run_h(["git", "add", "."])
+ _run_h(["git", "commit", "-m", "test"])
+ self.assertFalse(pu.is_git_dirty(dirpath))
+ test_file.touch()
+ self.assertFalse(pu.is_git_dirty(dirpath))
+ with test_file.open("w", encoding="utf-8"):
+ test_file.write_text("abc")
+ self.assertTrue(pu.is_git_dirty(dirpath))
+
+ @mock.patch("patch_utils.git_clean_context", mock.MagicMock)
+ def test_update_version_ranges(self):
+ """Test the UpdateVersionRanges function."""
+ with tempfile.TemporaryDirectory(
+ prefix="patch_manager_unittest"
+ ) as dirname:
+ dirpath = Path(dirname)
+ patches = [
+ pu.PatchEntry(
+ workdir=dirpath,
+ rel_patch_path="x.patch",
+ metadata=None,
+ platforms=None,
+ version_range={
+ "from": 0,
+ "until": 2,
+ },
+ ),
+ pu.PatchEntry(
+ workdir=dirpath,
+ rel_patch_path="y.patch",
+ metadata=None,
+ platforms=None,
+ version_range={
+ "from": 0,
+ "until": 2,
+ },
+ ),
+ ]
+ patches[0].apply = mock.MagicMock(
+ return_value=pu.PatchResult(
+ succeeded=False, failed_hunks={"a/b/c": []}
+ )
+ )
+ patches[1].apply = mock.MagicMock(
+ return_value=pu.PatchResult(succeeded=True)
+ )
+ results, _ = pu.update_version_ranges_with_entries(
+ 1, dirpath, patches
+ )
+ # We should only have updated the version_range of the first patch,
+ # as that one failed to apply.
+ self.assertEqual(len(results), 1)
+ self.assertEqual(results[0].version_range, {"from": 0, "until": 1})
+ self.assertEqual(patches[0].version_range, {"from": 0, "until": 1})
+ self.assertEqual(patches[1].version_range, {"from": 0, "until": 2})
+
+ @mock.patch("builtins.print")
+ def test_remove_old_patches(self, _):
+ """Can remove old patches from PATCHES.json."""
+ one_patch_dict = {
+ "metadata": {
+ "title": "[some label] hello world",
+ },
+ "platforms": [
+ "chromiumos",
+ ],
+ "rel_patch_path": "x/y/z",
+ "version_range": {
+ "from": 4,
+ "until": 5,
+ },
+ }
+ patches = [
+ one_patch_dict,
+ {**one_patch_dict, "version_range": {"until": None}},
+ {**one_patch_dict, "version_range": {"from": 100}},
+ {**one_patch_dict, "version_range": {"until": 8}},
+ ]
+ cases = [
+ (0, lambda x: self.assertEqual(len(x), 4)),
+ (6, lambda x: self.assertEqual(len(x), 3)),
+ (8, lambda x: self.assertEqual(len(x), 2)),
+ (1000, lambda x: self.assertEqual(len(x), 2)),
+ ]
+
+ def _t(dirname: str, svn_version: int, assertion_f: Callable):
+ json_filepath = Path(dirname) / "PATCHES.json"
+ with json_filepath.open("w", encoding="utf-8") as f:
+ json.dump(patches, f)
+ pu.remove_old_patches(svn_version, Path(), json_filepath)
+ with json_filepath.open("r", encoding="utf-8") as f:
+ result = json.load(f)
+ assertion_f(result)
+
+ with tempfile.TemporaryDirectory(
+ prefix="patch_utils_unittest"
+ ) as dirname:
+ for r, a in cases:
+ _t(dirname, r, a)
+
+ @staticmethod
+ def _default_json_dict():
+ return {
+ "metadata": {
+ "title": "hello world",
+ },
+ "platforms": ["a"],
+ "rel_patch_path": "x/y/z",
+ "version_range": {
+ "from": 4,
+ "until": 9,
+ },
+ }
+
+ @staticmethod
+ def _mock_dir(path: str = "a/b/c"):
+ workdir = Path(path)
+ workdir = mock.MagicMock(workdir)
+ workdir.is_dir = lambda: True
+ workdir.joinpath = lambda x: Path(path).joinpath(x)
+ workdir.__truediv__ = lambda self, x: self.joinpath(x)
+ return workdir
+
+
+_EXAMPLE_PATCH = """
+diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp
+index 5620a543438..099eb769ca5 100644
+--- a/clang/lib/Driver/ToolChains/Clang.cpp
++++ b/clang/lib/Driver/ToolChains/Clang.cpp
+@@ -3995,8 +3995,11 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
+ Args.hasArg(options::OPT_dA))
+ CmdArgs.push_back("-masm-verbose");
+
+- if (!TC.useIntegratedAs())
++ if (!TC.useIntegratedAs()) {
+ CmdArgs.push_back("-no-integrated-as");
++ CmdArgs.push_back("-mllvm");
++ CmdArgs.push_back("-enable-call-graph-profile-sort=false");
++ }
+
+ if (Args.hasArg(options::OPT_fdebug_pass_structure)) {
+ CmdArgs.push_back("-mdebug-pass");
+diff --git a/llvm/lib/Passes/PassBuilder.cpp b/llvm/lib/Passes/PassBuilder.cpp
+index c5fd68299eb..4c6e15eeeb9 100644
+--- a/llvm/lib/Passes/PassBuilder.cpp
++++ b/llvm/lib/Passes/PassBuilder.cpp
+@@ -212,6 +212,10 @@ static cl::opt<bool>
+ EnableCHR("enable-chr-npm", cl::init(true), cl::Hidden,
+ cl::desc("Enable control height reduction optimization (CHR)"));
+
++static cl::opt<bool> EnableCallGraphProfileSort(
++ "enable-call-graph-profile-sort", cl::init(true), cl::Hidden,
++ cl::desc("Enable call graph profile pass for the new PM (default = on)"));
++
+ extern cl::opt<bool> EnableHotColdSplit;
+ extern cl::opt<bool> EnableOrderFileInstrumentation;
+
+@@ -939,7 +943,8 @@ ModulePassManager PassBuilder::buildModuleOptimizationPipeline(
+ // Add the core optimizing pipeline.
+ MPM.addPass(createModuleToFunctionPassAdaptor(std::move(OptimizePM)));
+
+- MPM.addPass(CGProfilePass());
++ if (EnableCallGraphProfileSort)
++ MPM.addPass(CGProfilePass());
+
+ // Now we need to do some global optimization transforms.
+ // FIXME: It would seem like these should come first in the optimization
+"""
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/llvm_tools/revert_checker.py b/llvm_tools/revert_checker.py
index acc8b5fa..17914ba8 100755
--- a/llvm_tools/revert_checker.py
+++ b/llvm_tools/revert_checker.py
@@ -1,12 +1,12 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-#===----------------------------------------------------------------------===##
+# ===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
-#===----------------------------------------------------------------------===##
+# ===----------------------------------------------------------------------===##
#
# !!!!!!!!!!!! NOTE !!!!!!!!!!!!
# This is copied directly from upstream LLVM. Please make any changes upstream,
@@ -51,9 +51,10 @@ import logging
import re
import subprocess
import sys
-from typing import Generator, List, NamedTuple, Iterable
+from typing import Generator, Iterable, List, NamedTuple
-assert sys.version_info >= (3, 6), 'Only Python 3.6+ is supported.'
+
+assert sys.version_info >= (3, 6), "Only Python 3.6+ is supported."
# People are creative with their reverts, and heuristics are a bit difficult.
# Like 90% of of reverts have "This reverts commit ${full_sha}".
@@ -65,206 +66,256 @@ assert sys.version_info >= (3, 6), 'Only Python 3.6+ is supported.'
def _try_parse_reverts_from_commit_message(commit_message: str) -> List[str]:
- if not commit_message:
- return []
+ if not commit_message:
+ return []
- results = re.findall(r'This reverts commit ([a-f0-9]{40})\b', commit_message)
+ results = re.findall(
+ r"This reverts commit ([a-f0-9]{40})\b", commit_message
+ )
- first_line = commit_message.splitlines()[0]
- initial_revert = re.match(r'Revert ([a-f0-9]{6,}) "', first_line)
- if initial_revert:
- results.append(initial_revert.group(1))
- return results
+ first_line = commit_message.splitlines()[0]
+ initial_revert = re.match(r'Revert ([a-f0-9]{6,}) "', first_line)
+ if initial_revert:
+ results.append(initial_revert.group(1))
+ return results
def _stream_stdout(command: List[str]) -> Generator[str, None, None]:
- with subprocess.Popen(
- command, stdout=subprocess.PIPE, encoding='utf-8', errors='replace') as p:
- assert p.stdout is not None # for mypy's happiness.
- yield from p.stdout
+ with subprocess.Popen(
+ command, stdout=subprocess.PIPE, encoding="utf-8", errors="replace"
+ ) as p:
+ assert p.stdout is not None # for mypy's happiness.
+ yield from p.stdout
def _resolve_sha(git_dir: str, sha: str) -> str:
- if len(sha) == 40:
- return sha
-
- return subprocess.check_output(
- ['git', '-C', git_dir, 'rev-parse', sha],
- encoding='utf-8',
- stderr=subprocess.DEVNULL,
- ).strip()
-
-
-_LogEntry = NamedTuple('_LogEntry', [
- ('sha', str),
- ('commit_message', str),
-])
-
-
-def _log_stream(git_dir: str, root_sha: str,
- end_at_sha: str) -> Iterable[_LogEntry]:
- sep = 50 * '<>'
- log_command = [
- 'git',
- '-C',
- git_dir,
- 'log',
- '^' + end_at_sha,
- root_sha,
- '--format=' + sep + '%n%H%n%B%n',
- ]
-
- stdout_stream = iter(_stream_stdout(log_command))
-
- # Find the next separator line. If there's nothing to log, it may not exist.
- # It might not be the first line if git feels complainy.
- found_commit_header = False
- for line in stdout_stream:
- if line.rstrip() == sep:
- found_commit_header = True
- break
-
- while found_commit_header:
- sha = next(stdout_stream, None)
- assert sha is not None, 'git died?'
- sha = sha.rstrip()
-
- commit_message = []
-
+ if len(sha) == 40:
+ return sha
+
+ return subprocess.check_output(
+ ["git", "-C", git_dir, "rev-parse", sha],
+ encoding="utf-8",
+ stderr=subprocess.DEVNULL,
+ ).strip()
+
+
+_LogEntry = NamedTuple(
+ "_LogEntry",
+ [
+ ("sha", str),
+ ("commit_message", str),
+ ],
+)
+
+
+def _log_stream(
+ git_dir: str, root_sha: str, end_at_sha: str
+) -> Iterable[_LogEntry]:
+ sep = 50 * "<>"
+ log_command = [
+ "git",
+ "-C",
+ git_dir,
+ "log",
+ "^" + end_at_sha,
+ root_sha,
+ "--format=" + sep + "%n%H%n%B%n",
+ ]
+
+ stdout_stream = iter(_stream_stdout(log_command))
+
+ # Find the next separator line. If there's nothing to log, it may not exist.
+ # It might not be the first line if git feels complainy.
found_commit_header = False
for line in stdout_stream:
- line = line.rstrip()
- if line.rstrip() == sep:
- found_commit_header = True
- break
- commit_message.append(line)
+ if line.rstrip() == sep:
+ found_commit_header = True
+ break
+
+ while found_commit_header:
+ sha = next(stdout_stream, None)
+ assert sha is not None, "git died?"
+ sha = sha.rstrip()
+
+ commit_message = []
+
+ found_commit_header = False
+ for line in stdout_stream:
+ line = line.rstrip()
+ if line.rstrip() == sep:
+ found_commit_header = True
+ break
+ commit_message.append(line)
- yield _LogEntry(sha, '\n'.join(commit_message).rstrip())
+ yield _LogEntry(sha, "\n".join(commit_message).rstrip())
def _shas_between(git_dir: str, base_ref: str, head_ref: str) -> Iterable[str]:
- rev_list = [
- 'git',
- '-C',
- git_dir,
- 'rev-list',
- '--first-parent',
- f'{base_ref}..{head_ref}',
- ]
- return (x.strip() for x in _stream_stdout(rev_list))
+ rev_list = [
+ "git",
+ "-C",
+ git_dir,
+ "rev-list",
+ "--first-parent",
+ f"{base_ref}..{head_ref}",
+ ]
+ return (x.strip() for x in _stream_stdout(rev_list))
def _rev_parse(git_dir: str, ref: str) -> str:
- return subprocess.check_output(
- ['git', '-C', git_dir, 'rev-parse', ref],
- encoding='utf-8',
- ).strip()
+ return subprocess.check_output(
+ ["git", "-C", git_dir, "rev-parse", ref],
+ encoding="utf-8",
+ ).strip()
-Revert = NamedTuple('Revert', [
- ('sha', str),
- ('reverted_sha', str),
-])
+Revert = NamedTuple(
+ "Revert",
+ [
+ ("sha", str),
+ ("reverted_sha", str),
+ ],
+)
def _find_common_parent_commit(git_dir: str, ref_a: str, ref_b: str) -> str:
- """Finds the closest common parent commit between `ref_a` and `ref_b`."""
- return subprocess.check_output(
- ['git', '-C', git_dir, 'merge-base', ref_a, ref_b],
- encoding='utf-8',
- ).strip()
+ """Finds the closest common parent commit between `ref_a` and `ref_b`."""
+ return subprocess.check_output(
+ ["git", "-C", git_dir, "merge-base", ref_a, ref_b],
+ encoding="utf-8",
+ ).strip()
def find_reverts(git_dir: str, across_ref: str, root: str) -> List[Revert]:
- """Finds reverts across `across_ref` in `git_dir`, starting from `root`.
-
- These reverts are returned in order of oldest reverts first.
- """
- across_sha = _rev_parse(git_dir, across_ref)
- root_sha = _rev_parse(git_dir, root)
-
- common_ancestor = _find_common_parent_commit(git_dir, across_sha, root_sha)
- if common_ancestor != across_sha:
- raise ValueError(f"{across_sha} isn't an ancestor of {root_sha} "
- '(common ancestor: {common_ancestor})')
-
- intermediate_commits = set(_shas_between(git_dir, across_sha, root_sha))
- assert across_sha not in intermediate_commits
-
- logging.debug('%d commits appear between %s and %s',
- len(intermediate_commits), across_sha, root_sha)
-
- all_reverts = []
- for sha, commit_message in _log_stream(git_dir, root_sha, across_sha):
- reverts = _try_parse_reverts_from_commit_message(commit_message)
- if not reverts:
- continue
-
- resolved_reverts = sorted(set(_resolve_sha(git_dir, x) for x in reverts))
- for reverted_sha in resolved_reverts:
- if reverted_sha in intermediate_commits:
- logging.debug('Commit %s reverts %s, which happened after %s', sha,
- reverted_sha, across_sha)
- continue
-
- try:
- object_type = subprocess.check_output(
- ['git', '-C', git_dir, 'cat-file', '-t', reverted_sha],
- encoding='utf-8',
- stderr=subprocess.DEVNULL,
- ).strip()
- except subprocess.CalledProcessError:
- logging.warning(
- 'Failed to resolve reverted object %s (claimed to be reverted '
- 'by sha %s)', reverted_sha, sha)
- continue
-
- if object_type == 'commit':
- all_reverts.append(Revert(sha, reverted_sha))
- continue
-
- logging.error("%s claims to revert %s -- which isn't a commit -- %s", sha,
- object_type, reverted_sha)
-
- # Since `all_reverts` contains reverts in log order (e.g., newer comes before
- # older), we need to reverse this to keep with our guarantee of older =
- # earlier in the result.
- all_reverts.reverse()
- return all_reverts
+ """Finds reverts across `across_ref` in `git_dir`, starting from `root`.
+
+ These reverts are returned in order of oldest reverts first.
+ """
+ across_sha = _rev_parse(git_dir, across_ref)
+ root_sha = _rev_parse(git_dir, root)
+
+ common_ancestor = _find_common_parent_commit(git_dir, across_sha, root_sha)
+ if common_ancestor != across_sha:
+ raise ValueError(
+ f"{across_sha} isn't an ancestor of {root_sha} "
+ "(common ancestor: {common_ancestor})"
+ )
+
+ intermediate_commits = set(_shas_between(git_dir, across_sha, root_sha))
+ assert across_sha not in intermediate_commits
+
+ logging.debug(
+ "%d commits appear between %s and %s",
+ len(intermediate_commits),
+ across_sha,
+ root_sha,
+ )
+
+ all_reverts = []
+ for sha, commit_message in _log_stream(git_dir, root_sha, across_sha):
+ reverts = _try_parse_reverts_from_commit_message(commit_message)
+ if not reverts:
+ continue
+
+ resolved_reverts = sorted(
+ set(_resolve_sha(git_dir, x) for x in reverts)
+ )
+ for reverted_sha in resolved_reverts:
+ if reverted_sha in intermediate_commits:
+ logging.debug(
+ "Commit %s reverts %s, which happened after %s",
+ sha,
+ reverted_sha,
+ across_sha,
+ )
+ continue
+
+ try:
+ object_type = subprocess.check_output(
+ ["git", "-C", git_dir, "cat-file", "-t", reverted_sha],
+ encoding="utf-8",
+ stderr=subprocess.DEVNULL,
+ ).strip()
+ except subprocess.CalledProcessError:
+ logging.warning(
+ "Failed to resolve reverted object %s (claimed to be reverted "
+ "by sha %s)",
+ reverted_sha,
+ sha,
+ )
+ continue
+
+ if object_type == "commit":
+ all_reverts.append(Revert(sha, reverted_sha))
+ continue
+
+ logging.error(
+ "%s claims to revert %s -- which isn't a commit -- %s",
+ sha,
+ object_type,
+ reverted_sha,
+ )
+
+ # Since `all_reverts` contains reverts in log order (e.g., newer comes before
+ # older), we need to reverse this to keep with our guarantee of older =
+ # earlier in the result.
+ all_reverts.reverse()
+ return all_reverts
def _main() -> None:
- parser = argparse.ArgumentParser(
- description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
- parser.add_argument(
- 'base_ref', help='Git ref or sha to check for reverts around.')
- parser.add_argument(
- '-C', '--git_dir', default='.', help='Git directory to use.')
- parser.add_argument(
- 'root', nargs='+', help='Root(s) to search for commits from.')
- parser.add_argument('--debug', action='store_true')
- opts = parser.parse_args()
-
- logging.basicConfig(
- format='%(asctime)s: %(levelname)s: %(filename)s:%(lineno)d: %(message)s',
- level=logging.DEBUG if opts.debug else logging.INFO,
- )
-
- # `root`s can have related history, so we want to filter duplicate commits
- # out. The overwhelmingly common case is also to have one root, and it's way
- # easier to reason about output that comes in an order that's meaningful to
- # git.
- seen_reverts = set()
- all_reverts = []
- for root in opts.root:
- for revert in find_reverts(opts.git_dir, opts.base_ref, root):
- if revert not in seen_reverts:
- seen_reverts.add(revert)
- all_reverts.append(revert)
-
- for revert in all_reverts:
- print(f'{revert.sha} claims to revert {revert.reverted_sha}')
-
-
-if __name__ == '__main__':
- _main()
+ parser = argparse.ArgumentParser(
+ description=__doc__,
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ )
+ parser.add_argument(
+ "base_ref", help="Git ref or sha to check for reverts around."
+ )
+ parser.add_argument(
+ "-C", "--git_dir", default=".", help="Git directory to use."
+ )
+ parser.add_argument(
+ "root", nargs="+", help="Root(s) to search for commits from."
+ )
+ parser.add_argument("--debug", action="store_true")
+ parser.add_argument(
+ "-u",
+ "--review_url",
+ action="store_true",
+ help="Format SHAs as llvm review URLs",
+ )
+ opts = parser.parse_args()
+
+ logging.basicConfig(
+ format="%(asctime)s: %(levelname)s: %(filename)s:%(lineno)d: %(message)s",
+ level=logging.DEBUG if opts.debug else logging.INFO,
+ )
+
+ # `root`s can have related history, so we want to filter duplicate commits
+ # out. The overwhelmingly common case is also to have one root, and it's way
+ # easier to reason about output that comes in an order that's meaningful to
+ # git.
+ seen_reverts = set()
+ all_reverts = []
+ for root in opts.root:
+ for revert in find_reverts(opts.git_dir, opts.base_ref, root):
+ if revert not in seen_reverts:
+ seen_reverts.add(revert)
+ all_reverts.append(revert)
+
+ for revert in all_reverts:
+ sha_fmt = (
+ f"https://reviews.llvm.org/rG{revert.sha}"
+ if opts.review_url
+ else revert.sha
+ )
+ reverted_sha_fmt = (
+ f"https://reviews.llvm.org/rG{revert.reverted_sha}"
+ if opts.review_url
+ else revert.reverted_sha
+ )
+ print(f"{sha_fmt} claims to revert {reverted_sha_fmt}")
+
+
+if __name__ == "__main__":
+ _main()
diff --git a/llvm_tools/subprocess_helpers.py b/llvm_tools/subprocess_helpers.py
index 8845112c..bc87db85 100644
--- a/llvm_tools/subprocess_helpers.py
+++ b/llvm_tools/subprocess_helpers.py
@@ -1,58 +1,59 @@
# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Helpers/wrappers for the subprocess module for migration to python3."""
-from __future__ import print_function
import subprocess
def CheckCommand(cmd):
- """Executes the command using Popen()."""
+ """Executes the command using Popen()."""
- cmd_obj = subprocess.Popen(
- cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding='UTF-8')
+ cmd_obj = subprocess.Popen(
+ cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding="UTF-8"
+ )
- stdout, _ = cmd_obj.communicate()
+ stdout, _ = cmd_obj.communicate()
- if cmd_obj.returncode:
- print(stdout)
- raise subprocess.CalledProcessError(cmd_obj.returncode, cmd)
+ if cmd_obj.returncode:
+ print(stdout)
+ raise subprocess.CalledProcessError(cmd_obj.returncode, cmd)
def check_output(cmd, cwd=None):
- """Wrapper for pre-python3 subprocess.check_output()."""
+ """Wrapper for pre-python3 subprocess.check_output()."""
- return subprocess.check_output(cmd, encoding='UTF-8', cwd=cwd)
+ return subprocess.check_output(cmd, encoding="UTF-8", cwd=cwd)
def check_call(cmd, cwd=None):
- """Wrapper for pre-python3 subprocess.check_call()."""
+ """Wrapper for pre-python3 subprocess.check_call()."""
- subprocess.check_call(cmd, encoding='UTF-8', cwd=cwd)
+ subprocess.check_call(cmd, encoding="UTF-8", cwd=cwd)
# FIXME: CTRL+C does not work when executing a command inside the chroot via
# `cros_sdk`.
def ChrootRunCommand(chroot_path, cmd, verbose=False):
- """Runs the command inside the chroot."""
+ """Runs the command inside the chroot."""
- exec_chroot_cmd = ['cros_sdk', '--']
- exec_chroot_cmd.extend(cmd)
+ exec_chroot_cmd = ["cros_sdk", "--"]
+ exec_chroot_cmd.extend(cmd)
- return ExecCommandAndCaptureOutput(
- exec_chroot_cmd, cwd=chroot_path, verbose=verbose)
+ return ExecCommandAndCaptureOutput(
+ exec_chroot_cmd, cwd=chroot_path, verbose=verbose
+ )
def ExecCommandAndCaptureOutput(cmd, cwd=None, verbose=False):
- """Executes the command and prints to stdout if possible."""
+ """Executes the command and prints to stdout if possible."""
- out = check_output(cmd, cwd=cwd).rstrip()
+ out = check_output(cmd, cwd=cwd).rstrip()
- if verbose and out:
- print(out)
+ if verbose and out:
+ print(out)
- return out
+ return out
diff --git a/llvm_tools/test_helpers.py b/llvm_tools/test_helpers.py
index 99448181..67d88d9f 100644
--- a/llvm_tools/test_helpers.py
+++ b/llvm_tools/test_helpers.py
@@ -1,89 +1,88 @@
# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Helper functions for unit testing."""
-from __future__ import print_function
from contextlib import contextmanager
-from tempfile import mkstemp
import json
import os
+from tempfile import mkstemp
class ArgsOutputTest(object):
- """Testing class to simulate a argument parser object."""
+ """Testing class to simulate a argument parser object."""
- def __init__(self, svn_option='google3'):
- self.chroot_path = '/abs/path/to/chroot'
- self.last_tested = '/abs/path/to/last_tested_file.json'
- self.llvm_version = svn_option
- self.verbose = False
- self.extra_change_lists = None
- self.options = ['latest-toolchain']
- self.builders = ['some-builder']
+ def __init__(self, svn_option="google3"):
+ self.chroot_path = "/abs/path/to/chroot"
+ self.last_tested = "/abs/path/to/last_tested_file.json"
+ self.llvm_version = svn_option
+ self.verbose = False
+ self.extra_change_lists = None
+ self.options = ["latest-toolchain"]
+ self.builders = ["some-builder"]
# FIXME: Migrate modules with similar helper to use this module.
def CallCountsToMockFunctions(mock_function):
- """A decorator that passes a call count to the function it decorates.
+ """A decorator that passes a call count to the function it decorates.
- Examples:
- @CallCountsToMockFunctions
- def foo(call_count):
- return call_count
- ...
- ...
- [foo(), foo(), foo()]
- [0, 1, 2]
- """
+ Examples:
+ @CallCountsToMockFunctions
+ def foo(call_count):
+ return call_count
+ ...
+ ...
+ [foo(), foo(), foo()]
+ [0, 1, 2]
+ """
- counter = [0]
+ counter = [0]
- def Result(*args, **kwargs):
- # For some values of `counter`, the mock function would simulate raising
- # an exception, so let the test case catch the exception via
- # `unittest.TestCase.assertRaises()` and to also handle recursive functions.
- prev_counter = counter[0]
- counter[0] += 1
+ def Result(*args, **kwargs):
+ # For some values of `counter`, the mock function would simulate raising
+ # an exception, so let the test case catch the exception via
+ # `unittest.TestCase.assertRaises()` and to also handle recursive functions.
+ prev_counter = counter[0]
+ counter[0] += 1
- ret_value = mock_function(prev_counter, *args, **kwargs)
+ ret_value = mock_function(prev_counter, *args, **kwargs)
- return ret_value
+ return ret_value
- return Result
+ return Result
def WritePrettyJsonFile(file_name, json_object):
- """Writes the contents of the file to the json object.
+ """Writes the contents of the file to the json object.
- Args:
- file_name: The file that has contents to be used for the json object.
- json_object: The json object to write to.
- """
+ Args:
+ file_name: The file that has contents to be used for the json object.
+ json_object: The json object to write to.
+ """
- json.dump(file_name, json_object, indent=4, separators=(',', ': '))
+ json.dump(file_name, json_object, indent=4, separators=(",", ": "))
def CreateTemporaryJsonFile():
- """Makes a temporary .json file."""
+ """Makes a temporary .json file."""
- return CreateTemporaryFile(suffix='.json')
+ return CreateTemporaryFile(suffix=".json")
@contextmanager
-def CreateTemporaryFile(suffix=''):
- """Makes a temporary file."""
+def CreateTemporaryFile(suffix=""):
+ """Makes a temporary file."""
- fd, temp_file_path = mkstemp(suffix=suffix)
+ fd, temp_file_path = mkstemp(suffix=suffix)
- os.close(fd)
+ os.close(fd)
- try:
- yield temp_file_path
+ try:
+ yield temp_file_path
- finally:
- if os.path.isfile(temp_file_path):
- os.remove(temp_file_path)
+ finally:
+ if os.path.isfile(temp_file_path):
+ os.remove(temp_file_path)
diff --git a/llvm_tools/update_chromeos_llvm_hash.py b/llvm_tools/update_chromeos_llvm_hash.py
index 4e9b9104..75c6ce6c 100755
--- a/llvm_tools/update_chromeos_llvm_hash.py
+++ b/llvm_tools/update_chromeos_llvm_hash.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -10,37 +10,40 @@ For each package, a temporary repo is created and the changes are uploaded
for review.
"""
-from __future__ import print_function
-
import argparse
import datetime
import enum
import os
+from pathlib import Path
import re
import subprocess
+from typing import Dict, Iterable
import chroot
import failure_modes
import get_llvm_hash
import git
-import llvm_patch_management
+import patch_utils
+import subprocess_helpers
+
DEFAULT_PACKAGES = [
- 'dev-util/lldb-server',
- 'sys-devel/llvm',
- 'sys-libs/compiler-rt',
- 'sys-libs/libcxx',
- 'sys-libs/libcxxabi',
- 'sys-libs/llvm-libunwind',
+ "dev-util/lldb-server",
+ "sys-devel/llvm",
+ "sys-libs/compiler-rt",
+ "sys-libs/libcxx",
+ "sys-libs/llvm-libunwind",
]
+DEFAULT_MANIFEST_PACKAGES = ["sys-devel/llvm"]
+
# Specify which LLVM hash to update
class LLVMVariant(enum.Enum):
- """Represent the LLVM hash in an ebuild file to update."""
+ """Represent the LLVM hash in an ebuild file to update."""
- current = 'LLVM_HASH'
- next = 'LLVM_NEXT_HASH'
+ current = "LLVM_HASH"
+ next = "LLVM_NEXT_HASH"
# If set to `True`, then the contents of `stdout` after executing a command will
@@ -48,582 +51,753 @@ class LLVMVariant(enum.Enum):
verbose = False
-def defaultCrosRoot():
- """Get default location of chroot_path.
+def defaultCrosRoot() -> Path:
+ """Get default location of chroot_path.
- The logic assumes that the cros_root is ~/chromiumos, unless llvm_tools is
- inside of a CrOS checkout, in which case that checkout should be used.
+ The logic assumes that the cros_root is ~/chromiumos, unless llvm_tools is
+ inside of a CrOS checkout, in which case that checkout should be used.
- Returns:
- The best guess location for the cros checkout.
- """
- llvm_tools_path = os.path.realpath(os.path.dirname(__file__))
- if llvm_tools_path.endswith('src/third_party/toolchain-utils/llvm_tools'):
- return os.path.join(llvm_tools_path, '../../../../')
- return '~/chromiumos'
+ Returns:
+ The best guess location for the cros checkout.
+ """
+ llvm_tools_path = os.path.realpath(os.path.dirname(__file__))
+ if llvm_tools_path.endswith("src/third_party/toolchain-utils/llvm_tools"):
+ return Path(llvm_tools_path).parent.parent.parent.parent
+ return Path.home() / "chromiumos"
def GetCommandLineArgs():
- """Parses the command line for the optional command line arguments.
-
- Returns:
- The log level to use when retrieving the LLVM hash or google3 LLVM version,
- the chroot path to use for executing chroot commands,
- a list of a package or packages to update their LLVM next hash,
- and the LLVM version to use when retrieving the LLVM hash.
- """
-
- # Create parser and add optional command-line arguments.
- parser = argparse.ArgumentParser(
- description="Updates the build's hash for llvm-next.")
-
- # Add argument for a specific chroot path.
- parser.add_argument('--chroot_path',
- default=defaultCrosRoot(),
- help='the path to the chroot (default: %(default)s)')
-
- # Add argument for specific builds to uprev and update their llvm-next hash.
- parser.add_argument('--update_packages',
- default=DEFAULT_PACKAGES,
- required=False,
- nargs='+',
- help='the ebuilds to update their hash for llvm-next '
- '(default: %(default)s)')
-
- # Add argument for whether to display command contents to `stdout`.
- parser.add_argument('--verbose',
- action='store_true',
- help='display contents of a command to the terminal '
- '(default: %(default)s)')
-
- # Add argument for the LLVM hash to update
- parser.add_argument(
- '--is_llvm_next',
- action='store_true',
- help='which llvm hash to update. If specified, update LLVM_NEXT_HASH. '
- 'Otherwise, update LLVM_HASH')
-
- # Add argument for the LLVM version to use.
- parser.add_argument(
- '--llvm_version',
- type=get_llvm_hash.IsSvnOption,
- required=True,
- help='which git hash to use. Either a svn revision, or one '
- 'of %s' % sorted(get_llvm_hash.KNOWN_HASH_SOURCES))
-
- # Add argument for the mode of the patch management when handling patches.
- parser.add_argument(
- '--failure_mode',
- default=failure_modes.FailureModes.FAIL.value,
- choices=[
- failure_modes.FailureModes.FAIL.value,
- failure_modes.FailureModes.CONTINUE.value,
- failure_modes.FailureModes.DISABLE_PATCHES.value,
- failure_modes.FailureModes.REMOVE_PATCHES.value
- ],
- help='the mode of the patch manager when handling failed patches '
- '(default: %(default)s)')
-
- # Add argument for the patch metadata file.
- parser.add_argument(
- '--patch_metadata_file',
- default='PATCHES.json',
- help='the .json file that has all the patches and their '
- 'metadata if applicable (default: PATCHES.json inside $FILESDIR)')
-
- # Parse the command line.
- args_output = parser.parse_args()
-
- # FIXME: We shouldn't be using globals here, but until we fix it, make pylint
- # stop complaining about it.
- # pylint: disable=global-statement
- global verbose
-
- verbose = args_output.verbose
-
- return args_output
+ """Parses the command line for the optional command line arguments.
+
+ Returns:
+ The log level to use when retrieving the LLVM hash or google3 LLVM version,
+ the chroot path to use for executing chroot commands,
+ a list of a package or packages to update their LLVM next hash,
+ and the LLVM version to use when retrieving the LLVM hash.
+ """
+
+ # Create parser and add optional command-line arguments.
+ parser = argparse.ArgumentParser(
+ description="Updates the build's hash for llvm-next."
+ )
+
+ # Add argument for a specific chroot path.
+ parser.add_argument(
+ "--chroot_path",
+ type=Path,
+ default=defaultCrosRoot(),
+ help="the path to the chroot (default: %(default)s)",
+ )
+
+ # Add argument for specific builds to uprev and update their llvm-next hash.
+ parser.add_argument(
+ "--update_packages",
+ default=",".join(DEFAULT_PACKAGES),
+ help="Comma-separated ebuilds to update llvm-next hash for "
+ "(default: %(default)s)",
+ )
+
+ parser.add_argument(
+ "--manifest_packages",
+ default="",
+ help="Comma-separated ebuilds to update manifests for "
+ "(default: %(default)s)",
+ )
+
+ # Add argument for whether to display command contents to `stdout`.
+ parser.add_argument(
+ "--verbose",
+ action="store_true",
+ help="display contents of a command to the terminal "
+ "(default: %(default)s)",
+ )
+
+ # Add argument for the LLVM hash to update
+ parser.add_argument(
+ "--is_llvm_next",
+ action="store_true",
+ help="which llvm hash to update. If specified, update LLVM_NEXT_HASH. "
+ "Otherwise, update LLVM_HASH",
+ )
+
+ # Add argument for the LLVM version to use.
+ parser.add_argument(
+ "--llvm_version",
+ type=get_llvm_hash.IsSvnOption,
+ required=True,
+ help="which git hash to use. Either a svn revision, or one "
+ f"of {sorted(get_llvm_hash.KNOWN_HASH_SOURCES)}",
+ )
+
+ # Add argument for the mode of the patch management when handling patches.
+ parser.add_argument(
+ "--failure_mode",
+ default=failure_modes.FailureModes.FAIL.value,
+ choices=[
+ failure_modes.FailureModes.FAIL.value,
+ failure_modes.FailureModes.CONTINUE.value,
+ failure_modes.FailureModes.DISABLE_PATCHES.value,
+ failure_modes.FailureModes.REMOVE_PATCHES.value,
+ ],
+ help="the mode of the patch manager when handling failed patches "
+ "(default: %(default)s)",
+ )
+
+ # Add argument for the patch metadata file.
+ parser.add_argument(
+ "--patch_metadata_file",
+ default="PATCHES.json",
+ help="the .json file that has all the patches and their "
+ "metadata if applicable (default: PATCHES.json inside $FILESDIR)",
+ )
+
+ # Parse the command line.
+ args_output = parser.parse_args()
+
+ # FIXME: We shouldn't be using globals here, but until we fix it, make pylint
+ # stop complaining about it.
+ # pylint: disable=global-statement
+ global verbose
+
+ verbose = args_output.verbose
+
+ return args_output
def GetEbuildPathsFromSymLinkPaths(symlinks):
- """Reads the symlink(s) to get the ebuild path(s) to the package(s).
+ """Reads the symlink(s) to get the ebuild path(s) to the package(s).
- Args:
- symlinks: A list of absolute path symlink/symlinks that point
- to the package's ebuild.
+ Args:
+ symlinks: A list of absolute path symlink/symlinks that point
+ to the package's ebuild.
- Returns:
- A dictionary where the key is the absolute path of the symlink and the value
- is the absolute path to the ebuild that was read from the symlink.
+ Returns:
+ A dictionary where the key is the absolute path of the symlink and the value
+ is the absolute path to the ebuild that was read from the symlink.
- Raises:
- ValueError: Invalid symlink(s) were provided.
- """
+ Raises:
+ ValueError: Invalid symlink(s) were provided.
+ """
- # A dictionary that holds:
- # key: absolute symlink path
- # value: absolute ebuild path
- resolved_paths = {}
+ # A dictionary that holds:
+ # key: absolute symlink path
+ # value: absolute ebuild path
+ resolved_paths = {}
- # Iterate through each symlink.
- #
- # For each symlink, check that it is a valid symlink,
- # and then construct the ebuild path, and
- # then add the ebuild path to the dict.
- for cur_symlink in symlinks:
- if not os.path.islink(cur_symlink):
- raise ValueError('Invalid symlink provided: %s' % cur_symlink)
+ # Iterate through each symlink.
+ #
+ # For each symlink, check that it is a valid symlink,
+ # and then construct the ebuild path, and
+ # then add the ebuild path to the dict.
+ for cur_symlink in symlinks:
+ if not os.path.islink(cur_symlink):
+ raise ValueError(f"Invalid symlink provided: {cur_symlink}")
- # Construct the absolute path to the ebuild.
- ebuild_path = os.path.realpath(cur_symlink)
+ # Construct the absolute path to the ebuild.
+ ebuild_path = os.path.realpath(cur_symlink)
- if cur_symlink not in resolved_paths:
- resolved_paths[cur_symlink] = ebuild_path
+ if cur_symlink not in resolved_paths:
+ resolved_paths[cur_symlink] = ebuild_path
- return resolved_paths
+ return resolved_paths
def UpdateEbuildLLVMHash(ebuild_path, llvm_variant, git_hash, svn_version):
- """Updates the LLVM hash in the ebuild.
+ """Updates the LLVM hash in the ebuild.
- The build changes are staged for commit in the temporary repo.
+ The build changes are staged for commit in the temporary repo.
- Args:
- ebuild_path: The absolute path to the ebuild.
- llvm_variant: Which LLVM hash to update.
- git_hash: The new git hash.
- svn_version: The SVN-style revision number of git_hash.
+ Args:
+ ebuild_path: The absolute path to the ebuild.
+ llvm_variant: Which LLVM hash to update.
+ git_hash: The new git hash.
+ svn_version: The SVN-style revision number of git_hash.
- Raises:
- ValueError: Invalid ebuild path provided or failed to stage the commit
- of the changes or failed to update the LLVM hash.
- """
+ Raises:
+ ValueError: Invalid ebuild path provided or failed to stage the commit
+ of the changes or failed to update the LLVM hash.
+ """
- # Iterate through each ebuild.
- #
- # For each ebuild, read the file in
- # advance and then create a temporary file
- # that gets updated with the new LLVM hash
- # and revision number and then the ebuild file
- # gets updated to the temporary file.
+ # Iterate through each ebuild.
+ #
+ # For each ebuild, read the file in
+ # advance and then create a temporary file
+ # that gets updated with the new LLVM hash
+ # and revision number and then the ebuild file
+ # gets updated to the temporary file.
- if not os.path.isfile(ebuild_path):
- raise ValueError('Invalid ebuild path provided: %s' % ebuild_path)
+ if not os.path.isfile(ebuild_path):
+ raise ValueError(f"Invalid ebuild path provided: {ebuild_path}")
- temp_ebuild_file = '%s.temp' % ebuild_path
+ temp_ebuild_file = f"{ebuild_path}.temp"
- with open(ebuild_path) as ebuild_file:
- # write updates to a temporary file in case of interrupts
- with open(temp_ebuild_file, 'w') as temp_file:
- for cur_line in ReplaceLLVMHash(ebuild_file, llvm_variant, git_hash,
- svn_version):
- temp_file.write(cur_line)
- os.rename(temp_ebuild_file, ebuild_path)
+ with open(ebuild_path) as ebuild_file:
+ # write updates to a temporary file in case of interrupts
+ with open(temp_ebuild_file, "w") as temp_file:
+ for cur_line in ReplaceLLVMHash(
+ ebuild_file, llvm_variant, git_hash, svn_version
+ ):
+ temp_file.write(cur_line)
+ os.rename(temp_ebuild_file, ebuild_path)
- # Get the path to the parent directory.
- parent_dir = os.path.dirname(ebuild_path)
+ # Get the path to the parent directory.
+ parent_dir = os.path.dirname(ebuild_path)
- # Stage the changes.
- subprocess.check_output(['git', '-C', parent_dir, 'add', ebuild_path])
+ # Stage the changes.
+ subprocess.check_output(["git", "-C", parent_dir, "add", ebuild_path])
def ReplaceLLVMHash(ebuild_lines, llvm_variant, git_hash, svn_version):
- """Updates the LLVM git hash.
+ """Updates the LLVM git hash.
- Args:
- ebuild_lines: The contents of the ebuild file.
- llvm_variant: The LLVM hash to update.
- git_hash: The new git hash.
- svn_version: The SVN-style revision number of git_hash.
+ Args:
+ ebuild_lines: The contents of the ebuild file.
+ llvm_variant: The LLVM hash to update.
+ git_hash: The new git hash.
+ svn_version: The SVN-style revision number of git_hash.
- Yields:
- lines of the modified ebuild file
- """
- is_updated = False
- llvm_regex = re.compile('^' + re.escape(llvm_variant.value) +
- '=\"[a-z0-9]+\"')
- for cur_line in ebuild_lines:
- if not is_updated and llvm_regex.search(cur_line):
- # Update the git hash and revision number.
- cur_line = '%s=\"%s\" # r%d\n' % (llvm_variant.value, git_hash,
- svn_version)
+ Yields:
+ lines of the modified ebuild file
+ """
+ is_updated = False
+ llvm_regex = re.compile(
+ "^" + re.escape(llvm_variant.value) + '="[a-z0-9]+"'
+ )
+ for cur_line in ebuild_lines:
+ if not is_updated and llvm_regex.search(cur_line):
+ # Update the git hash and revision number.
+ cur_line = f'{llvm_variant.value}="{git_hash}" # r{svn_version}\n'
- is_updated = True
+ is_updated = True
- yield cur_line
+ yield cur_line
- if not is_updated:
- raise ValueError('Failed to update %s' % llvm_variant.value)
+ if not is_updated:
+ raise ValueError(f"Failed to update {llvm_variant.value}")
def UprevEbuildSymlink(symlink):
- """Uprevs the symlink's revision number.
+ """Uprevs the symlink's revision number.
- Increases the revision number by 1 and stages the change in
- the temporary repo.
+ Increases the revision number by 1 and stages the change in
+ the temporary repo.
- Args:
- symlink: The absolute path of an ebuild symlink.
+ Args:
+ symlink: The absolute path of an ebuild symlink.
- Raises:
- ValueError: Failed to uprev the symlink or failed to stage the changes.
- """
+ Raises:
+ ValueError: Failed to uprev the symlink or failed to stage the changes.
+ """
- if not os.path.islink(symlink):
- raise ValueError('Invalid symlink provided: %s' % symlink)
+ if not os.path.islink(symlink):
+ raise ValueError(f"Invalid symlink provided: {symlink}")
- new_symlink, is_changed = re.subn(
- r'r([0-9]+).ebuild',
- lambda match: 'r%s.ebuild' % str(int(match.group(1)) + 1),
- symlink,
- count=1)
+ new_symlink, is_changed = re.subn(
+ r"r([0-9]+).ebuild",
+ lambda match: "r%s.ebuild" % str(int(match.group(1)) + 1),
+ symlink,
+ count=1,
+ )
- if not is_changed:
- raise ValueError('Failed to uprev the symlink.')
+ if not is_changed:
+ raise ValueError("Failed to uprev the symlink.")
- # rename the symlink
- subprocess.check_output(
- ['git', '-C',
- os.path.dirname(symlink), 'mv', symlink, new_symlink])
+ # rename the symlink
+ subprocess.check_output(
+ ["git", "-C", os.path.dirname(symlink), "mv", symlink, new_symlink]
+ )
def UprevEbuildToVersion(symlink, svn_version, git_hash):
- """Uprevs the ebuild's revision number.
-
- Increases the revision number by 1 and stages the change in
- the temporary repo.
-
- Args:
- symlink: The absolute path of an ebuild symlink.
- svn_version: The SVN-style revision number of git_hash.
- git_hash: The new git hash.
-
- Raises:
- ValueError: Failed to uprev the ebuild or failed to stage the changes.
- AssertionError: No llvm version provided for an LLVM uprev
- """
-
- if not os.path.islink(symlink):
- raise ValueError('Invalid symlink provided: %s' % symlink)
-
- ebuild = os.path.realpath(symlink)
- llvm_major_version = get_llvm_hash.GetLLVMMajorVersion(git_hash)
- # llvm
- package = os.path.basename(os.path.dirname(symlink))
- if not package:
- raise ValueError('Tried to uprev an unknown package')
- if package == 'llvm':
- new_ebuild, is_changed = re.subn(
- r'(\d+)\.(\d+)_pre([0-9]+)_p([0-9]+)',
- '%s.\\2_pre%s_p%s' % (llvm_major_version, svn_version,
- datetime.datetime.today().strftime('%Y%m%d')),
- ebuild,
- count=1)
- # any other package
- else:
- new_ebuild, is_changed = re.subn(r'(\d+)\.(\d+)_pre([0-9]+)',
- '%s.\\2_pre%s' %
- (llvm_major_version, svn_version),
- ebuild,
- count=1)
-
- if not is_changed: # failed to increment the revision number
- raise ValueError('Failed to uprev the ebuild.')
-
- symlink_dir = os.path.dirname(symlink)
-
- # Rename the ebuild
- subprocess.check_output(['git', '-C', symlink_dir, 'mv', ebuild, new_ebuild])
-
- # Create a symlink of the renamed ebuild
- new_symlink = new_ebuild[:-len('.ebuild')] + '-r1.ebuild'
- subprocess.check_output(['ln', '-s', '-r', new_ebuild, new_symlink])
-
- if not os.path.islink(new_symlink):
- raise ValueError('Invalid symlink name: %s' % new_ebuild[:-len('.ebuild')])
-
- subprocess.check_output(['git', '-C', symlink_dir, 'add', new_symlink])
-
- # Remove the old symlink
- subprocess.check_output(['git', '-C', symlink_dir, 'rm', symlink])
-
-
-def CreatePathDictionaryFromPackages(chroot_path, update_packages):
- """Creates a symlink and ebuild path pair dictionary from the packages.
-
- Args:
- chroot_path: The absolute path to the chroot.
- update_packages: The filtered packages to be updated.
-
- Returns:
- A dictionary where the key is the absolute path to the symlink
- of the package and the value is the absolute path to the ebuild of
- the package.
- """
-
- # Construct a list containing the chroot file paths of the package(s).
- chroot_file_paths = chroot.GetChrootEbuildPaths(chroot_path, update_packages)
-
- # Construct a list containing the symlink(s) of the package(s).
- symlink_file_paths = chroot.ConvertChrootPathsToAbsolutePaths(
- chroot_path, chroot_file_paths)
-
- # Create a dictionary where the key is the absolute path of the symlink to
- # the package and the value is the absolute path to the ebuild of the package.
- return GetEbuildPathsFromSymLinkPaths(symlink_file_paths)
-
-
-def RemovePatchesFromFilesDir(patches):
- """Removes the patches from $FILESDIR of a package.
+ """Uprevs the ebuild's revision number.
+
+ Increases the revision number by 1 and stages the change in
+ the temporary repo.
+
+ Args:
+ symlink: The absolute path of an ebuild symlink.
+ svn_version: The SVN-style revision number of git_hash.
+ git_hash: The new git hash.
+
+ Raises:
+ ValueError: Failed to uprev the ebuild or failed to stage the changes.
+ AssertionError: No llvm version provided for an LLVM uprev
+ """
+
+ if not os.path.islink(symlink):
+ raise ValueError(f"Invalid symlink provided: {symlink}")
+
+ ebuild = os.path.realpath(symlink)
+ llvm_major_version = get_llvm_hash.GetLLVMMajorVersion(git_hash)
+ # llvm
+ package = os.path.basename(os.path.dirname(symlink))
+ if not package:
+ raise ValueError("Tried to uprev an unknown package")
+ if package == "llvm":
+ new_ebuild, is_changed = re.subn(
+ r"(\d+)\.(\d+)_pre([0-9]+)_p([0-9]+)",
+ "%s.\\2_pre%s_p%s"
+ % (
+ llvm_major_version,
+ svn_version,
+ datetime.datetime.today().strftime("%Y%m%d"),
+ ),
+ ebuild,
+ count=1,
+ )
+ # any other package
+ else:
+ new_ebuild, is_changed = re.subn(
+ r"(\d+)\.(\d+)_pre([0-9]+)",
+ "%s.\\2_pre%s" % (llvm_major_version, svn_version),
+ ebuild,
+ count=1,
+ )
- Args:
- patches: A list of absolute pathes of patches to remove
+ if not is_changed: # failed to increment the revision number
+ raise ValueError("Failed to uprev the ebuild.")
- Raises:
- ValueError: Failed to remove a patch in $FILESDIR.
- """
+ symlink_dir = os.path.dirname(symlink)
- for patch in patches:
+ # Rename the ebuild
subprocess.check_output(
- ['git', '-C', os.path.dirname(patch), 'rm', '-f', patch])
-
-
-def StagePatchMetadataFileForCommit(patch_metadata_file_path):
- """Stages the updated patch metadata file for commit.
+ ["git", "-C", symlink_dir, "mv", ebuild, new_ebuild]
+ )
- Args:
- patch_metadata_file_path: The absolute path to the patch metadata file.
+ # Create a symlink of the renamed ebuild
+ new_symlink = new_ebuild[: -len(".ebuild")] + "-r1.ebuild"
+ subprocess.check_output(["ln", "-s", "-r", new_ebuild, new_symlink])
- Raises:
- ValueError: Failed to stage the patch metadata file for commit or invalid
- patch metadata file.
- """
+ if not os.path.islink(new_symlink):
+ raise ValueError(
+ f'Invalid symlink name: {new_ebuild[:-len(".ebuild")]}'
+ )
- if not os.path.isfile(patch_metadata_file_path):
- raise ValueError('Invalid patch metadata file provided: %s' %
- patch_metadata_file_path)
+ subprocess.check_output(["git", "-C", symlink_dir, "add", new_symlink])
- # Cmd to stage the patch metadata file for commit.
- subprocess.check_output([
- 'git', '-C',
- os.path.dirname(patch_metadata_file_path), 'add',
- patch_metadata_file_path
- ])
+ # Remove the old symlink
+ subprocess.check_output(["git", "-C", symlink_dir, "rm", symlink])
-def StagePackagesPatchResultsForCommit(package_info_dict, commit_messages):
- """Stages the patch results of the packages to the commit message.
-
- Args:
- package_info_dict: A dictionary where the key is the package name and the
- value is a dictionary that contains information about the patches of the
- package (key).
- commit_messages: The commit message that has the updated ebuilds and
- upreving information.
-
- Returns:
- commit_messages with new additions
- """
-
- # For each package, check if any patches for that package have
- # changed, if so, add which patches have changed to the commit
- # message.
- for package_name, patch_info_dict in package_info_dict.items():
- if (patch_info_dict['disabled_patches']
- or patch_info_dict['removed_patches']
- or patch_info_dict['modified_metadata']):
- cur_package_header = '\nFor the package %s:' % package_name
- commit_messages.append(cur_package_header)
-
- # Add to the commit message that the patch metadata file was modified.
- if patch_info_dict['modified_metadata']:
- patch_metadata_path = patch_info_dict['modified_metadata']
- commit_messages.append('The patch metadata file %s was modified' %
- os.path.basename(patch_metadata_path))
-
- StagePatchMetadataFileForCommit(patch_metadata_path)
-
- # Add each disabled patch to the commit message.
- if patch_info_dict['disabled_patches']:
- commit_messages.append('The following patches were disabled:')
-
- for patch_path in patch_info_dict['disabled_patches']:
- commit_messages.append(os.path.basename(patch_path))
-
- # Add each removed patch to the commit message.
- if patch_info_dict['removed_patches']:
- commit_messages.append('The following patches were removed:')
-
- for patch_path in patch_info_dict['removed_patches']:
- commit_messages.append(os.path.basename(patch_path))
-
- RemovePatchesFromFilesDir(patch_info_dict['removed_patches'])
-
- return commit_messages
-
-
-def UpdatePackages(packages, llvm_variant, git_hash, svn_version, chroot_path,
- patch_metadata_file, mode, git_hash_source,
- extra_commit_msg):
- """Updates an LLVM hash and uprevs the ebuild of the packages.
-
- A temporary repo is created for the changes. The changes are
- then uploaded for review.
-
- Args:
- packages: A list of all the packages that are going to be updated.
- llvm_variant: The LLVM hash to update.
- git_hash: The new git hash.
- svn_version: The SVN-style revision number of git_hash.
- chroot_path: The absolute path to the chroot.
- patch_metadata_file: The name of the .json file in '$FILESDIR/' that has
- the patches and its metadata.
- mode: The mode of the patch manager when handling an applicable patch
- that failed to apply.
- Ex. 'FailureModes.FAIL'
- git_hash_source: The source of which git hash to use based off of.
- Ex. 'google3', 'tot', or <version> such as 365123
- extra_commit_msg: extra test to append to the commit message.
-
- Returns:
- A nametuple that has two (key, value) pairs, where the first pair is the
- Gerrit commit URL and the second pair is the change list number.
- """
-
- # Determines whether to print the result of each executed command.
- llvm_patch_management.verbose = verbose
-
- # Construct a dictionary where the key is the absolute path of the symlink to
- # the package and the value is the absolute path to the ebuild of the package.
- paths_dict = CreatePathDictionaryFromPackages(chroot_path, packages)
-
- repo_path = os.path.dirname(next(iter(paths_dict.values())))
-
- branch = 'update-' + llvm_variant.value + '-' + git_hash
-
- git.CreateBranch(repo_path, branch)
-
- try:
- commit_message_header = 'llvm'
- if llvm_variant == LLVMVariant.next:
- commit_message_header = 'llvm-next'
- if git_hash_source in get_llvm_hash.KNOWN_HASH_SOURCES:
- commit_message_header += ('/%s: upgrade to %s (r%d)' %
- (git_hash_source, git_hash, svn_version))
- else:
- commit_message_header += (': upgrade to %s (r%d)' %
- (git_hash, svn_version))
-
- commit_messages = [
- commit_message_header + '\n',
- 'The following packages have been updated:',
- ]
+def CreatePathDictionaryFromPackages(chroot_path, update_packages):
+ """Creates a symlink and ebuild path pair dictionary from the packages.
- # Holds the list of packages that are updating.
- packages = []
+ Args:
+ chroot_path: The absolute path to the chroot.
+ update_packages: The filtered packages to be updated.
- # Iterate through the dictionary.
- #
- # For each iteration:
- # 1) Update the ebuild's LLVM hash.
- # 2) Uprev the ebuild (symlink).
- # 3) Add the modified package to the commit message.
- for symlink_path, ebuild_path in paths_dict.items():
- path_to_ebuild_dir = os.path.dirname(ebuild_path)
+ Returns:
+ A dictionary where the key is the absolute path to the symlink
+ of the package and the value is the absolute path to the ebuild of
+ the package.
+ """
- UpdateEbuildLLVMHash(ebuild_path, llvm_variant, git_hash, svn_version)
+ # Construct a list containing the chroot file paths of the package(s).
+ chroot_file_paths = chroot.GetChrootEbuildPaths(
+ chroot_path, update_packages
+ )
- if llvm_variant == LLVMVariant.current:
- UprevEbuildToVersion(symlink_path, svn_version, git_hash)
- else:
- UprevEbuildSymlink(symlink_path)
+ # Construct a list containing the symlink(s) of the package(s).
+ symlink_file_paths = chroot.ConvertChrootPathsToAbsolutePaths(
+ chroot_path, chroot_file_paths
+ )
- cur_dir_name = os.path.basename(path_to_ebuild_dir)
- parent_dir_name = os.path.basename(os.path.dirname(path_to_ebuild_dir))
+ # Create a dictionary where the key is the absolute path of the symlink to
+ # the package and the value is the absolute path to the ebuild of the package.
+ return GetEbuildPathsFromSymLinkPaths(symlink_file_paths)
- packages.append('%s/%s' % (parent_dir_name, cur_dir_name))
- commit_messages.append('%s/%s' % (parent_dir_name, cur_dir_name))
- EnsurePackageMaskContains(chroot_path, git_hash)
+def RemovePatchesFromFilesDir(patches):
+ """Removes the patches from $FILESDIR of a package.
- # Handle the patches for each package.
- package_info_dict = llvm_patch_management.UpdatePackagesPatchMetadataFile(
- chroot_path, svn_version, patch_metadata_file, packages, mode)
+ Args:
+ patches: A list of absolute paths of patches to remove
- # Update the commit message if changes were made to a package's patches.
- commit_messages = StagePackagesPatchResultsForCommit(
- package_info_dict, commit_messages)
+ Raises:
+ ValueError: Failed to remove a patch in $FILESDIR.
+ """
- if extra_commit_msg:
- commit_messages.append(extra_commit_msg)
+ for patch in patches:
+ subprocess.check_output(
+ ["git", "-C", os.path.dirname(patch), "rm", "-f", patch]
+ )
- change_list = git.UploadChanges(repo_path, branch, commit_messages)
- finally:
- git.DeleteBranch(repo_path, branch)
+def StagePatchMetadataFileForCommit(patch_metadata_file_path):
+ """Stages the updated patch metadata file for commit.
- return change_list
+ Args:
+ patch_metadata_file_path: The absolute path to the patch metadata file.
+ Raises:
+ ValueError: Failed to stage the patch metadata file for commit or invalid
+ patch metadata file.
+ """
-def EnsurePackageMaskContains(chroot_path, git_hash):
- """Adds the major version of llvm to package.mask if it's not already present.
+ if not os.path.isfile(patch_metadata_file_path):
+ raise ValueError(
+ f"Invalid patch metadata file provided: {patch_metadata_file_path}"
+ )
- Args:
- chroot_path: The absolute path to the chroot.
- git_hash: The new git hash.
+ # Cmd to stage the patch metadata file for commit.
+ subprocess.check_output(
+ [
+ "git",
+ "-C",
+ os.path.dirname(patch_metadata_file_path),
+ "add",
+ patch_metadata_file_path,
+ ]
+ )
- Raises:
- FileExistsError: package.mask not found in ../../chromiumos-overlay
- """
- llvm_major_version = get_llvm_hash.GetLLVMMajorVersion(git_hash)
+def StagePackagesPatchResultsForCommit(package_info_dict, commit_messages):
+ """Stages the patch results of the packages to the commit message.
+
+ Args:
+ package_info_dict: A dictionary where the key is the package name and the
+ value is a dictionary that contains information about the patches of the
+ package (key).
+ commit_messages: The commit message that has the updated ebuilds and
+ upreving information.
+
+ Returns:
+ commit_messages with new additions
+ """
+
+ # For each package, check if any patches for that package have
+ # changed, if so, add which patches have changed to the commit
+ # message.
+ for package_name, patch_info_dict in package_info_dict.items():
+ if (
+ patch_info_dict["disabled_patches"]
+ or patch_info_dict["removed_patches"]
+ or patch_info_dict["modified_metadata"]
+ ):
+ cur_package_header = f"\nFor the package {package_name}:"
+ commit_messages.append(cur_package_header)
+
+ # Add to the commit message that the patch metadata file was modified.
+ if patch_info_dict["modified_metadata"]:
+ patch_metadata_path = patch_info_dict["modified_metadata"]
+ metadata_file_name = os.path.basename(patch_metadata_path)
+ commit_messages.append(
+ f"The patch metadata file {metadata_file_name} was modified"
+ )
+
+ StagePatchMetadataFileForCommit(patch_metadata_path)
+
+ # Add each disabled patch to the commit message.
+ if patch_info_dict["disabled_patches"]:
+ commit_messages.append("The following patches were disabled:")
+
+ for patch_path in patch_info_dict["disabled_patches"]:
+ commit_messages.append(os.path.basename(patch_path))
+
+ # Add each removed patch to the commit message.
+ if patch_info_dict["removed_patches"]:
+ commit_messages.append("The following patches were removed:")
+
+ for patch_path in patch_info_dict["removed_patches"]:
+ commit_messages.append(os.path.basename(patch_path))
+
+ RemovePatchesFromFilesDir(patch_info_dict["removed_patches"])
+
+ return commit_messages
+
+
+def UpdateManifests(packages: Iterable[str], chroot_path: Path):
+ """Updates manifest files for packages.
+
+ Args:
+ packages: A list of packages to update manifests for.
+ chroot_path: The absolute path to the chroot.
+
+ Raises:
+ CalledProcessError: ebuild failed to update manifest.
+ """
+ manifest_ebuilds = chroot.GetChrootEbuildPaths(chroot_path, packages)
+ for ebuild_path in manifest_ebuilds:
+ subprocess_helpers.ChrootRunCommand(
+ chroot_path, ["ebuild", ebuild_path, "manifest"]
+ )
+
+
+def UpdatePackages(
+ packages: Iterable[str],
+ manifest_packages: Iterable[str],
+ llvm_variant,
+ git_hash,
+ svn_version,
+ chroot_path: Path,
+ mode,
+ git_hash_source,
+ extra_commit_msg,
+):
+ """Updates an LLVM hash and uprevs the ebuild of the packages.
+
+ A temporary repo is created for the changes. The changes are
+ then uploaded for review.
+
+ Args:
+ packages: A list of all the packages that are going to be updated.
+ manifest_packages: A list of packages to update manifests for.
+ llvm_variant: The LLVM hash to update.
+ git_hash: The new git hash.
+ svn_version: The SVN-style revision number of git_hash.
+ chroot_path: The absolute path to the chroot.
+ mode: The mode of the patch manager when handling an applicable patch
+ that failed to apply.
+ Ex. 'FailureModes.FAIL'
+ git_hash_source: The source of which git hash to use based off of.
+ Ex. 'google3', 'tot', or <version> such as 365123
+ extra_commit_msg: extra test to append to the commit message.
+
+ Returns:
+ A nametuple that has two (key, value) pairs, where the first pair is the
+ Gerrit commit URL and the second pair is the change list number.
+ """
+
+ # Construct a dictionary where the key is the absolute path of the symlink to
+ # the package and the value is the absolute path to the ebuild of the package.
+ paths_dict = CreatePathDictionaryFromPackages(chroot_path, packages)
+
+ repo_path = os.path.dirname(next(iter(paths_dict.values())))
+
+ branch = "update-" + llvm_variant.value + "-" + git_hash
+
+ git.CreateBranch(repo_path, branch)
+
+ try:
+ commit_message_header = "llvm"
+ if llvm_variant == LLVMVariant.next:
+ commit_message_header = "llvm-next"
+ if git_hash_source in get_llvm_hash.KNOWN_HASH_SOURCES:
+ commit_message_header += (
+ f"/{git_hash_source}: upgrade to {git_hash} (r{svn_version})"
+ )
+ else:
+ commit_message_header += f": upgrade to {git_hash} (r{svn_version})"
+
+ commit_lines = [
+ commit_message_header + "\n",
+ "The following packages have been updated:",
+ ]
+
+ # Holds the list of packages that are updating.
+ packages = []
+
+ # Iterate through the dictionary.
+ #
+ # For each iteration:
+ # 1) Update the ebuild's LLVM hash.
+ # 2) Uprev the ebuild (symlink).
+ # 3) Add the modified package to the commit message.
+ for symlink_path, ebuild_path in paths_dict.items():
+ path_to_ebuild_dir = os.path.dirname(ebuild_path)
+
+ UpdateEbuildLLVMHash(
+ ebuild_path, llvm_variant, git_hash, svn_version
+ )
+
+ if llvm_variant == LLVMVariant.current:
+ UprevEbuildToVersion(symlink_path, svn_version, git_hash)
+ else:
+ UprevEbuildSymlink(symlink_path)
+
+ cur_dir_name = os.path.basename(path_to_ebuild_dir)
+ parent_dir_name = os.path.basename(
+ os.path.dirname(path_to_ebuild_dir)
+ )
+
+ packages.append(f"{parent_dir_name}/{cur_dir_name}")
+ commit_lines.append(f"{parent_dir_name}/{cur_dir_name}")
+
+ if manifest_packages:
+ UpdateManifests(manifest_packages, chroot_path)
+ commit_lines.append("Updated manifest for:")
+ commit_lines.extend(manifest_packages)
+
+ EnsurePackageMaskContains(chroot_path, git_hash)
+
+ # Handle the patches for each package.
+ package_info_dict = UpdatePackagesPatchMetadataFile(
+ chroot_path, svn_version, packages, mode
+ )
+
+ # Update the commit message if changes were made to a package's patches.
+ commit_lines = StagePackagesPatchResultsForCommit(
+ package_info_dict, commit_lines
+ )
+
+ if extra_commit_msg:
+ commit_lines.append(extra_commit_msg)
+
+ change_list = git.UploadChanges(repo_path, branch, commit_lines)
+
+ finally:
+ git.DeleteBranch(repo_path, branch)
+
+ return change_list
- overlay_dir = os.path.join(chroot_path, 'src/third_party/chromiumos-overlay')
- mask_path = os.path.join(overlay_dir,
- 'profiles/targets/chromeos/package.mask')
- with open(mask_path, 'r+') as mask_file:
- mask_contents = mask_file.read()
- expected_line = '=sys-devel/llvm-%s.0_pre*\n' % llvm_major_version
- if expected_line not in mask_contents:
- mask_file.write(expected_line)
- subprocess.check_output(['git', '-C', overlay_dir, 'add', mask_path])
+def EnsurePackageMaskContains(chroot_path, git_hash):
+ """Adds the major version of llvm to package.mask if it's not already present.
+
+ Args:
+ chroot_path: The absolute path to the chroot.
+ git_hash: The new git hash.
+
+ Raises:
+ FileExistsError: package.mask not found in ../../chromiumos-overlay
+ """
+
+ llvm_major_version = get_llvm_hash.GetLLVMMajorVersion(git_hash)
+
+ overlay_dir = os.path.join(
+ chroot_path, "src/third_party/chromiumos-overlay"
+ )
+ mask_path = os.path.join(
+ overlay_dir, "profiles/targets/chromeos/package.mask"
+ )
+ with open(mask_path, "r+") as mask_file:
+ mask_contents = mask_file.read()
+ expected_line = f"=sys-devel/llvm-{llvm_major_version}.0_pre*\n"
+ if expected_line not in mask_contents:
+ mask_file.write(expected_line)
+
+ subprocess.check_output(["git", "-C", overlay_dir, "add", mask_path])
+
+
+def UpdatePackagesPatchMetadataFile(
+ chroot_path: Path,
+ svn_version: int,
+ packages: Iterable[str],
+ mode: failure_modes.FailureModes,
+) -> Dict[str, patch_utils.PatchInfo]:
+ """Updates the packages metadata file.
+
+ Args:
+ chroot_path: The absolute path to the chroot.
+ svn_version: The version to use for patch management.
+ packages: All the packages to update their patch metadata file.
+ mode: The mode for the patch manager to use when an applicable patch
+ fails to apply.
+ Ex: 'FailureModes.FAIL'
+
+ Returns:
+ A dictionary where the key is the package name and the value is a dictionary
+ that has information on the patches.
+ """
+
+ # A dictionary where the key is the package name and the value is a dictionary
+ # that has information on the patches.
+ package_info = {}
+
+ llvm_hash = get_llvm_hash.LLVMHash()
+
+ with llvm_hash.CreateTempDirectory() as temp_dir:
+ with get_llvm_hash.CreateTempLLVMRepo(temp_dir) as dirname:
+ # Ensure that 'svn_version' exists in the chromiumum mirror of LLVM by
+ # finding its corresponding git hash.
+ git_hash = get_llvm_hash.GetGitHashFrom(dirname, svn_version)
+ move_head_cmd = ["git", "-C", dirname, "checkout", git_hash, "-q"]
+ subprocess.run(move_head_cmd, stdout=subprocess.DEVNULL, check=True)
+
+ for cur_package in packages:
+ # Get the absolute path to $FILESDIR of the package.
+ chroot_ebuild_str = subprocess_helpers.ChrootRunCommand(
+ chroot_path, ["equery", "w", cur_package]
+ ).strip()
+ if not chroot_ebuild_str:
+ raise RuntimeError(
+ f"could not find ebuild for {cur_package}"
+ )
+ chroot_ebuild_path = Path(
+ chroot.ConvertChrootPathsToAbsolutePaths(
+ chroot_path, [chroot_ebuild_str]
+ )[0]
+ )
+ patches_json_fp = (
+ chroot_ebuild_path.parent / "files" / "PATCHES.json"
+ )
+ if not patches_json_fp.is_file():
+ raise RuntimeError(
+ f"patches file {patches_json_fp} is not a file"
+ )
+
+ src_path = Path(dirname)
+ with patch_utils.git_clean_context(src_path):
+ if (
+ mode == failure_modes.FailureModes.FAIL
+ or mode == failure_modes.FailureModes.CONTINUE
+ ):
+ patches_info = patch_utils.apply_all_from_json(
+ svn_version=svn_version,
+ llvm_src_dir=src_path,
+ patches_json_fp=patches_json_fp,
+ continue_on_failure=mode
+ == failure_modes.FailureModes.CONTINUE,
+ )
+ elif mode == failure_modes.FailureModes.REMOVE_PATCHES:
+ patches_info = patch_utils.remove_old_patches(
+ svn_version, src_path, patches_json_fp
+ )
+ elif mode == failure_modes.FailureModes.DISABLE_PATCHES:
+ patches_info = patch_utils.update_version_ranges(
+ svn_version, src_path, patches_json_fp
+ )
+
+ package_info[cur_package] = patches_info._asdict()
+
+ return package_info
def main():
- """Updates the LLVM next hash for each package.
-
- Raises:
- AssertionError: The script was run inside the chroot.
- """
-
- chroot.VerifyOutsideChroot()
-
- args_output = GetCommandLineArgs()
-
- llvm_variant = LLVMVariant.current
- if args_output.is_llvm_next:
- llvm_variant = LLVMVariant.next
-
- git_hash_source = args_output.llvm_version
-
- git_hash, svn_version = get_llvm_hash.GetLLVMHashAndVersionFromSVNOption(
- git_hash_source)
-
- change_list = UpdatePackages(args_output.update_packages,
- llvm_variant,
- git_hash,
- svn_version,
- args_output.chroot_path,
- args_output.patch_metadata_file,
- failure_modes.FailureModes(
- args_output.failure_mode),
- git_hash_source,
- extra_commit_msg=None)
-
- print('Successfully updated packages to %s (%d)' % (git_hash, svn_version))
- print('Gerrit URL: %s' % change_list.url)
- print('Change list number: %d' % change_list.cl_number)
-
-
-if __name__ == '__main__':
- main()
+ """Updates the LLVM next hash for each package.
+
+ Raises:
+ AssertionError: The script was run inside the chroot.
+ """
+
+ chroot.VerifyOutsideChroot()
+
+ args_output = GetCommandLineArgs()
+
+ llvm_variant = LLVMVariant.current
+ if args_output.is_llvm_next:
+ llvm_variant = LLVMVariant.next
+
+ git_hash_source = args_output.llvm_version
+
+ git_hash, svn_version = get_llvm_hash.GetLLVMHashAndVersionFromSVNOption(
+ git_hash_source
+ )
+
+ # Filter out empty strings. For example "".split{",") returns [""].
+ packages = set(p for p in args_output.update_packages.split(",") if p)
+ manifest_packages = set(
+ p for p in args_output.manifest_packages.split(",") if p
+ )
+ if not manifest_packages and not args_output.is_llvm_next:
+ # Set default manifest packages only for the current llvm.
+ manifest_packages = set(DEFAULT_MANIFEST_PACKAGES)
+ change_list = UpdatePackages(
+ packages=packages,
+ manifest_packages=manifest_packages,
+ llvm_variant=llvm_variant,
+ git_hash=git_hash,
+ svn_version=svn_version,
+ chroot_path=args_output.chroot_path,
+ mode=failure_modes.FailureModes(args_output.failure_mode),
+ git_hash_source=git_hash_source,
+ extra_commit_msg=None,
+ )
+
+ print(f"Successfully updated packages to {git_hash} ({svn_version})")
+ print(f"Gerrit URL: {change_list.url}")
+ print(f"Change list number: {change_list.cl_number}")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/llvm_tools/update_chromeos_llvm_hash_unittest.py b/llvm_tools/update_chromeos_llvm_hash_unittest.py
index adb20598..b758538c 100755
--- a/llvm_tools/update_chromeos_llvm_hash_unittest.py
+++ b/llvm_tools/update_chromeos_llvm_hash_unittest.py
@@ -1,18 +1,18 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for updating LLVM hashes."""
-from __future__ import print_function
import collections
import datetime
import os
-import re
+from pathlib import Path
import subprocess
+import sys
import unittest
import unittest.mock as mock
@@ -20,865 +20,1185 @@ import chroot
import failure_modes
import get_llvm_hash
import git
-import llvm_patch_management
import test_helpers
import update_chromeos_llvm_hash
+
# These are unittests; protected access is OK to a point.
# pylint: disable=protected-access
class UpdateLLVMHashTest(unittest.TestCase):
- """Test class for updating LLVM hashes of packages."""
-
- @mock.patch.object(os.path, 'realpath')
- def testDefaultCrosRootFromCrOSCheckout(self, mock_llvm_tools):
- llvm_tools_path = '/path/to/cros/src/third_party/toolchain-utils/llvm_tools'
- mock_llvm_tools.return_value = llvm_tools_path
- self.assertEqual(update_chromeos_llvm_hash.defaultCrosRoot(),
- '%s/../../../../' % llvm_tools_path)
-
- @mock.patch.object(os.path, 'realpath')
- def testDefaultCrosRootFromOutsideCrOSCheckout(self, mock_llvm_tools):
- mock_llvm_tools.return_value = '~/toolchain-utils/llvm_tools'
- self.assertEqual(update_chromeos_llvm_hash.defaultCrosRoot(),
- '~/chromiumos')
-
- # Simulate behavior of 'os.path.isfile()' when the ebuild path to a package
- # does not exist.
- @mock.patch.object(os.path, 'isfile', return_value=False)
- def testFailedToUpdateLLVMHashForInvalidEbuildPath(self, mock_isfile):
- ebuild_path = '/some/path/to/package.ebuild'
- llvm_variant = update_chromeos_llvm_hash.LLVMVariant.current
- git_hash = 'a123testhash1'
- svn_version = 1000
-
- # Verify the exception is raised when the ebuild path does not exist.
- with self.assertRaises(ValueError) as err:
- update_chromeos_llvm_hash.UpdateEbuildLLVMHash(ebuild_path, llvm_variant,
- git_hash, svn_version)
-
- self.assertEqual(
- str(err.exception), 'Invalid ebuild path provided: %s' % ebuild_path)
-
- mock_isfile.assert_called_once()
-
- # Simulate 'os.path.isfile' behavior on a valid ebuild path.
- @mock.patch.object(os.path, 'isfile', return_value=True)
- def testFailedToUpdateLLVMHash(self, mock_isfile):
- # Create a temporary file to simulate an ebuild file of a package.
- with test_helpers.CreateTemporaryJsonFile() as ebuild_file:
- with open(ebuild_file, 'w') as f:
- f.write('\n'.join([
- 'First line in the ebuild', 'Second line in the ebuild',
- 'Last line in the ebuild'
- ]))
-
- llvm_variant = update_chromeos_llvm_hash.LLVMVariant.current
- git_hash = 'a123testhash1'
- svn_version = 1000
-
- # Verify the exception is raised when the ebuild file does not have
- # 'LLVM_HASH'.
- with self.assertRaises(ValueError) as err:
- update_chromeos_llvm_hash.UpdateEbuildLLVMHash(ebuild_file,
- llvm_variant, git_hash,
- svn_version)
-
- self.assertEqual(str(err.exception), 'Failed to update LLVM_HASH')
-
- llvm_variant = update_chromeos_llvm_hash.LLVMVariant.next
-
- self.assertEqual(mock_isfile.call_count, 2)
-
- # Simulate 'os.path.isfile' behavior on a valid ebuild path.
- @mock.patch.object(os.path, 'isfile', return_value=True)
- def testFailedToUpdateLLVMNextHash(self, mock_isfile):
- # Create a temporary file to simulate an ebuild file of a package.
- with test_helpers.CreateTemporaryJsonFile() as ebuild_file:
- with open(ebuild_file, 'w') as f:
- f.write('\n'.join([
- 'First line in the ebuild', 'Second line in the ebuild',
- 'Last line in the ebuild'
- ]))
-
- llvm_variant = update_chromeos_llvm_hash.LLVMVariant.next
- git_hash = 'a123testhash1'
- svn_version = 1000
-
- # Verify the exception is raised when the ebuild file does not have
- # 'LLVM_NEXT_HASH'.
- with self.assertRaises(ValueError) as err:
- update_chromeos_llvm_hash.UpdateEbuildLLVMHash(ebuild_file,
- llvm_variant, git_hash,
- svn_version)
-
- self.assertEqual(str(err.exception), 'Failed to update LLVM_NEXT_HASH')
-
- self.assertEqual(mock_isfile.call_count, 2)
-
- @mock.patch.object(os.path, 'isfile', return_value=True)
- @mock.patch.object(subprocess, 'check_output', return_value=None)
- def testSuccessfullyStageTheEbuildForCommitForLLVMHashUpdate(
- self, mock_stage_commit_command, mock_isfile):
-
- # Create a temporary file to simulate an ebuild file of a package.
- with test_helpers.CreateTemporaryJsonFile() as ebuild_file:
- # Updates LLVM_HASH to 'git_hash' and revision to
- # 'svn_version'.
- llvm_variant = update_chromeos_llvm_hash.LLVMVariant.current
- git_hash = 'a123testhash1'
- svn_version = 1000
-
- with open(ebuild_file, 'w') as f:
- f.write('\n'.join([
- 'First line in the ebuild', 'Second line in the ebuild',
- 'LLVM_HASH=\"a12b34c56d78e90\" # r500', 'Last line in the ebuild'
- ]))
-
- update_chromeos_llvm_hash.UpdateEbuildLLVMHash(ebuild_file, llvm_variant,
- git_hash, svn_version)
-
- expected_file_contents = [
- 'First line in the ebuild\n', 'Second line in the ebuild\n',
- 'LLVM_HASH=\"a123testhash1\" # r1000\n', 'Last line in the ebuild'
- ]
-
- # Verify the new file contents of the ebuild file match the expected file
- # contents.
- with open(ebuild_file) as new_file:
- file_contents_as_a_list = [cur_line for cur_line in new_file]
- self.assertListEqual(file_contents_as_a_list, expected_file_contents)
-
- self.assertEqual(mock_isfile.call_count, 2)
-
- mock_stage_commit_command.assert_called_once()
-
- @mock.patch.object(os.path, 'isfile', return_value=True)
- @mock.patch.object(subprocess, 'check_output', return_value=None)
- def testSuccessfullyStageTheEbuildForCommitForLLVMNextHashUpdate(
- self, mock_stage_commit_command, mock_isfile):
-
- # Create a temporary file to simulate an ebuild file of a package.
- with test_helpers.CreateTemporaryJsonFile() as ebuild_file:
- # Updates LLVM_NEXT_HASH to 'git_hash' and revision to
- # 'svn_version'.
- llvm_variant = update_chromeos_llvm_hash.LLVMVariant.next
- git_hash = 'a123testhash1'
- svn_version = 1000
-
- with open(ebuild_file, 'w') as f:
- f.write('\n'.join([
- 'First line in the ebuild', 'Second line in the ebuild',
- 'LLVM_NEXT_HASH=\"a12b34c56d78e90\" # r500',
- 'Last line in the ebuild'
- ]))
-
- update_chromeos_llvm_hash.UpdateEbuildLLVMHash(ebuild_file, llvm_variant,
- git_hash, svn_version)
-
- expected_file_contents = [
- 'First line in the ebuild\n', 'Second line in the ebuild\n',
- 'LLVM_NEXT_HASH=\"a123testhash1\" # r1000\n',
- 'Last line in the ebuild'
- ]
-
- # Verify the new file contents of the ebuild file match the expected file
- # contents.
- with open(ebuild_file) as new_file:
- file_contents_as_a_list = [cur_line for cur_line in new_file]
- self.assertListEqual(file_contents_as_a_list, expected_file_contents)
-
- self.assertEqual(mock_isfile.call_count, 2)
-
- mock_stage_commit_command.assert_called_once()
-
- @mock.patch.object(get_llvm_hash, 'GetLLVMMajorVersion')
- @mock.patch.object(os.path, 'islink', return_value=False)
- def testFailedToUprevEbuildToVersionForInvalidSymlink(self, mock_islink,
- mock_llvm_version):
- symlink_path = '/path/to/chroot/package/package.ebuild'
- svn_version = 1000
- git_hash = 'badf00d'
- mock_llvm_version.return_value = '1234'
-
- # Verify the exception is raised when a invalid symbolic link is passed in.
- with self.assertRaises(ValueError) as err:
- update_chromeos_llvm_hash.UprevEbuildToVersion(symlink_path, svn_version,
- git_hash)
-
- self.assertEqual(
- str(err.exception), 'Invalid symlink provided: %s' % symlink_path)
-
- mock_islink.assert_called_once()
- mock_llvm_version.assert_not_called()
-
- @mock.patch.object(os.path, 'islink', return_value=False)
- def testFailedToUprevEbuildSymlinkForInvalidSymlink(self, mock_islink):
- symlink_path = '/path/to/chroot/package/package.ebuild'
-
- # Verify the exception is raised when a invalid symbolic link is passed in.
- with self.assertRaises(ValueError) as err:
- update_chromeos_llvm_hash.UprevEbuildSymlink(symlink_path)
-
- self.assertEqual(
- str(err.exception), 'Invalid symlink provided: %s' % symlink_path)
-
- mock_islink.assert_called_once()
-
- @mock.patch.object(get_llvm_hash, 'GetLLVMMajorVersion')
- # Simulate 'os.path.islink' when a symbolic link is passed in.
- @mock.patch.object(os.path, 'islink', return_value=True)
- # Simulate 'os.path.realpath' when a symbolic link is passed in.
- @mock.patch.object(os.path, 'realpath', return_value=True)
- def testFailedToUprevEbuildToVersion(self, mock_realpath, mock_islink,
- mock_llvm_version):
- symlink_path = '/path/to/chroot/llvm/llvm_pre123_p.ebuild'
- mock_realpath.return_value = '/abs/path/to/llvm/llvm_pre123_p.ebuild'
- git_hash = 'badf00d'
- mock_llvm_version.return_value = '1234'
- svn_version = 1000
-
- # Verify the exception is raised when the symlink does not match the
- # expected pattern
- with self.assertRaises(ValueError) as err:
- update_chromeos_llvm_hash.UprevEbuildToVersion(symlink_path, svn_version,
- git_hash)
-
- self.assertEqual(str(err.exception), 'Failed to uprev the ebuild.')
-
- mock_llvm_version.assert_called_once_with(git_hash)
- mock_islink.assert_called_once_with(symlink_path)
-
- # Simulate 'os.path.islink' when a symbolic link is passed in.
- @mock.patch.object(os.path, 'islink', return_value=True)
- def testFailedToUprevEbuildSymlink(self, mock_islink):
- symlink_path = '/path/to/chroot/llvm/llvm_pre123_p.ebuild'
-
- # Verify the exception is raised when the symlink does not match the
- # expected pattern
- with self.assertRaises(ValueError) as err:
- update_chromeos_llvm_hash.UprevEbuildSymlink(symlink_path)
-
- self.assertEqual(str(err.exception), 'Failed to uprev the symlink.')
-
- mock_islink.assert_called_once_with(symlink_path)
-
- @mock.patch.object(get_llvm_hash, 'GetLLVMMajorVersion')
- @mock.patch.object(os.path, 'islink', return_value=True)
- @mock.patch.object(os.path, 'realpath')
- @mock.patch.object(subprocess, 'check_output', return_value=None)
- def testSuccessfullyUprevEbuildToVersionLLVM(self, mock_command_output,
- mock_realpath, mock_islink,
- mock_llvm_version):
- symlink = '/path/to/llvm/llvm-12.0_pre3_p2-r10.ebuild'
- ebuild = '/abs/path/to/llvm/llvm-12.0_pre3_p2.ebuild'
- mock_realpath.return_value = ebuild
- git_hash = 'badf00d'
- mock_llvm_version.return_value = '1234'
- svn_version = 1000
-
- update_chromeos_llvm_hash.UprevEbuildToVersion(symlink, svn_version,
- git_hash)
-
- mock_llvm_version.assert_called_once_with(git_hash)
-
- mock_islink.assert_called()
-
- mock_realpath.assert_called_once_with(symlink)
-
- mock_command_output.assert_called()
-
- # Verify commands
- symlink_dir = os.path.dirname(symlink)
- timestamp = datetime.datetime.today().strftime('%Y%m%d')
- new_ebuild = '/abs/path/to/llvm/llvm-1234.0_pre1000_p%s.ebuild' % timestamp
- new_symlink = new_ebuild[:-len('.ebuild')] + '-r1.ebuild'
-
- expected_cmd = ['git', '-C', symlink_dir, 'mv', ebuild, new_ebuild]
- self.assertEqual(mock_command_output.call_args_list[0],
- mock.call(expected_cmd))
-
- expected_cmd = ['ln', '-s', '-r', new_ebuild, new_symlink]
- self.assertEqual(mock_command_output.call_args_list[1],
- mock.call(expected_cmd))
-
- expected_cmd = ['git', '-C', symlink_dir, 'add', new_symlink]
- self.assertEqual(mock_command_output.call_args_list[2],
- mock.call(expected_cmd))
-
- expected_cmd = ['git', '-C', symlink_dir, 'rm', symlink]
- self.assertEqual(mock_command_output.call_args_list[3],
- mock.call(expected_cmd))
-
- @mock.patch.object(get_llvm_hash, 'GetLLVMMajorVersion')
- @mock.patch.object(os.path, 'islink', return_value=True)
- @mock.patch.object(os.path, 'realpath')
- @mock.patch.object(subprocess, 'check_output', return_value=None)
- def testSuccessfullyUprevEbuildToVersionNonLLVM(self, mock_command_output,
- mock_realpath, mock_islink,
- mock_llvm_version):
- symlink = '/abs/path/to/compiler-rt/compiler-rt-12.0_pre314159265-r4.ebuild'
- ebuild = '/abs/path/to/compiler-rt/compiler-rt-12.0_pre314159265.ebuild'
- mock_realpath.return_value = ebuild
- mock_llvm_version.return_value = '1234'
- svn_version = 1000
- git_hash = '5678'
-
- update_chromeos_llvm_hash.UprevEbuildToVersion(symlink, svn_version,
- git_hash)
-
- mock_islink.assert_called()
-
- mock_realpath.assert_called_once_with(symlink)
-
- mock_llvm_version.assert_called_once_with(git_hash)
-
- mock_command_output.assert_called()
-
- # Verify commands
- symlink_dir = os.path.dirname(symlink)
- new_ebuild = '/abs/path/to/compiler-rt/compiler-rt-1234.0_pre1000.ebuild'
- new_symlink = new_ebuild[:-len('.ebuild')] + '-r1.ebuild'
-
- expected_cmd = ['git', '-C', symlink_dir, 'mv', ebuild, new_ebuild]
- self.assertEqual(mock_command_output.call_args_list[0],
- mock.call(expected_cmd))
-
- expected_cmd = ['ln', '-s', '-r', new_ebuild, new_symlink]
- self.assertEqual(mock_command_output.call_args_list[1],
- mock.call(expected_cmd))
-
- expected_cmd = ['git', '-C', symlink_dir, 'add', new_symlink]
- self.assertEqual(mock_command_output.call_args_list[2],
- mock.call(expected_cmd))
-
- expected_cmd = ['git', '-C', symlink_dir, 'rm', symlink]
- self.assertEqual(mock_command_output.call_args_list[3],
- mock.call(expected_cmd))
-
- @mock.patch.object(os.path, 'islink', return_value=True)
- @mock.patch.object(subprocess, 'check_output', return_value=None)
- def testSuccessfullyUprevEbuildSymlink(self, mock_command_output,
- mock_islink):
- symlink_to_uprev = '/symlink/to/package-r1.ebuild'
-
- update_chromeos_llvm_hash.UprevEbuildSymlink(symlink_to_uprev)
-
- mock_islink.assert_called_once_with(symlink_to_uprev)
-
- mock_command_output.assert_called_once()
-
- # Simulate behavior of 'os.path.isdir()' when the path to the repo is not a
-
- # directory.
-
- @mock.patch.object(chroot, 'GetChrootEbuildPaths')
- @mock.patch.object(chroot, 'ConvertChrootPathsToAbsolutePaths')
- def testExceptionRaisedWhenCreatingPathDictionaryFromPackages(
- self, mock_chroot_paths_to_symlinks, mock_get_chroot_paths):
-
- chroot_path = '/some/path/to/chroot'
-
- package_name = 'test-pckg/package'
- package_chroot_path = '/some/chroot/path/to/package-r1.ebuild'
-
- # Test function to simulate 'ConvertChrootPathsToAbsolutePaths' when a
- # symlink does not start with the prefix '/mnt/host/source'.
- def BadPrefixChrootPath(*args):
- assert len(args) == 2
- raise ValueError('Invalid prefix for the chroot path: '
- '%s' % package_chroot_path)
-
- # Simulate 'GetChrootEbuildPaths' when valid packages are passed in.
- #
- # Returns a list of chroot paths.
- mock_get_chroot_paths.return_value = [package_chroot_path]
-
- # Use test function to simulate 'ConvertChrootPathsToAbsolutePaths'
- # behavior.
- mock_chroot_paths_to_symlinks.side_effect = BadPrefixChrootPath
-
- # Verify exception is raised when for an invalid prefix in the symlink.
- with self.assertRaises(ValueError) as err:
- update_chromeos_llvm_hash.CreatePathDictionaryFromPackages(
- chroot_path, [package_name])
-
- self.assertEqual(
- str(err.exception), 'Invalid prefix for the chroot path: '
- '%s' % package_chroot_path)
-
- mock_get_chroot_paths.assert_called_once_with(chroot_path, [package_name])
-
- mock_chroot_paths_to_symlinks.assert_called_once_with(
- chroot_path, [package_chroot_path])
-
- @mock.patch.object(chroot, 'GetChrootEbuildPaths')
- @mock.patch.object(chroot, 'ConvertChrootPathsToAbsolutePaths')
- @mock.patch.object(update_chromeos_llvm_hash,
- 'GetEbuildPathsFromSymLinkPaths')
- def testSuccessfullyCreatedPathDictionaryFromPackages(
- self, mock_ebuild_paths_from_symlink_paths, mock_chroot_paths_to_symlinks,
- mock_get_chroot_paths):
-
- package_chroot_path = '/mnt/host/source/src/path/to/package-r1.ebuild'
-
- # Simulate 'GetChrootEbuildPaths' when returning a chroot path for a valid
- # package.
- #
- # Returns a list of chroot paths.
- mock_get_chroot_paths.return_value = [package_chroot_path]
-
- package_symlink_path = '/some/path/to/chroot/src/path/to/package-r1.ebuild'
-
- # Simulate 'ConvertChrootPathsToAbsolutePaths' when returning a symlink to
- # a chroot path that points to a package.
- #
- # Returns a list of symlink file paths.
- mock_chroot_paths_to_symlinks.return_value = [package_symlink_path]
-
- chroot_package_path = '/some/path/to/chroot/src/path/to/package.ebuild'
-
- # Simulate 'GetEbuildPathsFromSymlinkPaths' when returning a dictionary of
- # a symlink that points to an ebuild.
- #
- # Returns a dictionary of a symlink and ebuild file path pair
- # where the key is the absolute path to the symlink of the ebuild file
- # and the value is the absolute path to the ebuild file of the package.
- mock_ebuild_paths_from_symlink_paths.return_value = {
- package_symlink_path: chroot_package_path
- }
-
- chroot_path = '/some/path/to/chroot'
- package_name = 'test-pckg/package'
-
- self.assertEqual(
- update_chromeos_llvm_hash.CreatePathDictionaryFromPackages(
- chroot_path, [package_name]),
- {package_symlink_path: chroot_package_path})
-
- mock_get_chroot_paths.assert_called_once_with(chroot_path, [package_name])
-
- mock_chroot_paths_to_symlinks.assert_called_once_with(
- chroot_path, [package_chroot_path])
-
- mock_ebuild_paths_from_symlink_paths.assert_called_once_with(
- [package_symlink_path])
-
- @mock.patch.object(subprocess, 'check_output', return_value=None)
- def testSuccessfullyRemovedPatchesFromFilesDir(self, mock_run_cmd):
- patches_to_remove_list = [
- '/abs/path/to/filesdir/cherry/fix_output.patch',
- '/abs/path/to/filesdir/display_results.patch'
- ]
-
- update_chromeos_llvm_hash.RemovePatchesFromFilesDir(patches_to_remove_list)
-
- self.assertEqual(mock_run_cmd.call_count, 2)
-
- @mock.patch.object(os.path, 'isfile', return_value=False)
- def testInvalidPatchMetadataFileStagedForCommit(self, mock_isfile):
- patch_metadata_path = '/abs/path/to/filesdir/PATCHES'
-
- # Verify the exception is raised when the absolute path to the patch
- # metadata file does not exist or is not a file.
- with self.assertRaises(ValueError) as err:
- update_chromeos_llvm_hash.StagePatchMetadataFileForCommit(
- patch_metadata_path)
-
- self.assertEqual(
- str(err.exception), 'Invalid patch metadata file provided: '
- '%s' % patch_metadata_path)
-
- mock_isfile.assert_called_once()
-
- @mock.patch.object(os.path, 'isfile', return_value=True)
- @mock.patch.object(subprocess, 'check_output', return_value=None)
- def testSuccessfullyStagedPatchMetadataFileForCommit(self, mock_run_cmd, _):
-
- patch_metadata_path = '/abs/path/to/filesdir/PATCHES.json'
-
- update_chromeos_llvm_hash.StagePatchMetadataFileForCommit(
- patch_metadata_path)
-
- mock_run_cmd.assert_called_once()
-
- def testNoPatchResultsForCommit(self):
- package_1_patch_info_dict = {
- 'applied_patches': ['display_results.patch'],
- 'failed_patches': ['fixes_output.patch'],
- 'non_applicable_patches': [],
- 'disabled_patches': [],
- 'removed_patches': [],
- 'modified_metadata': None
- }
-
- package_2_patch_info_dict = {
- 'applied_patches': ['redirects_stdout.patch', 'fix_display.patch'],
- 'failed_patches': [],
- 'non_applicable_patches': [],
- 'disabled_patches': [],
- 'removed_patches': [],
- 'modified_metadata': None
- }
-
- test_package_info_dict = {
- 'test-packages/package1': package_1_patch_info_dict,
- 'test-packages/package2': package_2_patch_info_dict
- }
-
- test_commit_message = ['Updated packages']
-
- self.assertListEqual(
- update_chromeos_llvm_hash.StagePackagesPatchResultsForCommit(
- test_package_info_dict, test_commit_message), test_commit_message)
-
- @mock.patch.object(update_chromeos_llvm_hash,
- 'StagePatchMetadataFileForCommit')
- @mock.patch.object(update_chromeos_llvm_hash, 'RemovePatchesFromFilesDir')
- def testAddedPatchResultsForCommit(self, mock_remove_patches,
- mock_stage_patches_for_commit):
-
- package_1_patch_info_dict = {
- 'applied_patches': [],
- 'failed_patches': [],
- 'non_applicable_patches': [],
- 'disabled_patches': ['fixes_output.patch'],
- 'removed_patches': [],
- 'modified_metadata': '/abs/path/to/filesdir/PATCHES.json'
- }
-
- package_2_patch_info_dict = {
- 'applied_patches': ['fix_display.patch'],
- 'failed_patches': [],
- 'non_applicable_patches': [],
- 'disabled_patches': [],
- 'removed_patches': ['/abs/path/to/filesdir/redirect_stdout.patch'],
- 'modified_metadata': '/abs/path/to/filesdir/PATCHES.json'
- }
-
- test_package_info_dict = {
- 'test-packages/package1': package_1_patch_info_dict,
- 'test-packages/package2': package_2_patch_info_dict
- }
-
- test_commit_message = ['Updated packages']
-
- expected_commit_messages = [
- 'Updated packages', '\nFor the package test-packages/package1:',
- 'The patch metadata file PATCHES.json was modified',
- 'The following patches were disabled:', 'fixes_output.patch',
- '\nFor the package test-packages/package2:',
- 'The patch metadata file PATCHES.json was modified',
- 'The following patches were removed:', 'redirect_stdout.patch'
- ]
-
- self.assertListEqual(
- update_chromeos_llvm_hash.StagePackagesPatchResultsForCommit(
- test_package_info_dict, test_commit_message),
- expected_commit_messages)
-
- path_to_removed_patch = '/abs/path/to/filesdir/redirect_stdout.patch'
-
- mock_remove_patches.assert_called_once_with([path_to_removed_patch])
-
- self.assertEqual(mock_stage_patches_for_commit.call_count, 2)
-
- @mock.patch.object(get_llvm_hash, 'GetLLVMMajorVersion')
- @mock.patch.object(update_chromeos_llvm_hash,
- 'CreatePathDictionaryFromPackages')
- @mock.patch.object(git, 'CreateBranch')
- @mock.patch.object(update_chromeos_llvm_hash, 'UpdateEbuildLLVMHash')
- @mock.patch.object(update_chromeos_llvm_hash, 'UprevEbuildSymlink')
- @mock.patch.object(git, 'UploadChanges')
- @mock.patch.object(git, 'DeleteBranch')
- @mock.patch.object(os.path, 'realpath')
- def testExceptionRaisedWhenUpdatingPackages(
- self, mock_realpath, mock_delete_repo, mock_upload_changes,
- mock_uprev_symlink, mock_update_llvm_next, mock_create_repo,
- mock_create_path_dict, mock_llvm_major_version):
-
- path_to_package_dir = '/some/path/to/chroot/src/path/to'
- abs_path_to_package = os.path.join(path_to_package_dir, 'package.ebuild')
- symlink_path_to_package = os.path.join(path_to_package_dir,
- 'package-r1.ebuild')
-
- mock_llvm_major_version.return_value = '1234'
-
- # Test function to simulate 'CreateBranch' when successfully created the
- # branch on a valid repo path.
- def SuccessfullyCreateBranchForChanges(_, branch):
- self.assertEqual(branch, 'update-LLVM_NEXT_HASH-a123testhash4')
-
- # Test function to simulate 'UpdateEbuildLLVMHash' when successfully
- # updated the ebuild's 'LLVM_NEXT_HASH'.
- def SuccessfullyUpdatedLLVMHash(ebuild_path, _, git_hash, svn_version):
- self.assertEqual(ebuild_path, abs_path_to_package)
- self.assertEqual(git_hash, 'a123testhash4')
- self.assertEqual(svn_version, 1000)
-
- # Test function to simulate 'UprevEbuildSymlink' when the symlink to the
- # ebuild does not have a revision number.
- def FailedToUprevEbuildSymlink(_):
- # Raises a 'ValueError' exception because the symlink did not have have a
- # revision number.
- raise ValueError('Failed to uprev the ebuild.')
-
- # Test function to fail on 'UploadChanges' if the function gets called
- # when an exception is raised.
- def ShouldNotExecuteUploadChanges(*args):
- # Test function should not be called (i.e. execution should resume in the
- # 'finally' block) because 'UprevEbuildSymlink' raised an
- # exception.
- assert len(args) == 3
- assert False, ('Failed to go to "finally" block '
- 'after the exception was raised.')
-
- test_package_path_dict = {symlink_path_to_package: abs_path_to_package}
-
- # Simulate behavior of 'CreatePathDictionaryFromPackages()' when
- # successfully created a dictionary where the key is the absolute path to
- # the symlink of the package and value is the absolute path to the ebuild of
- # the package.
- mock_create_path_dict.return_value = test_package_path_dict
-
- # Use test function to simulate behavior.
- mock_create_repo.side_effect = SuccessfullyCreateBranchForChanges
- mock_update_llvm_next.side_effect = SuccessfullyUpdatedLLVMHash
- mock_uprev_symlink.side_effect = FailedToUprevEbuildSymlink
- mock_upload_changes.side_effect = ShouldNotExecuteUploadChanges
- mock_realpath.return_value = '/abs/path/to/test-packages/package1.ebuild'
-
- packages_to_update = ['test-packages/package1']
- llvm_variant = update_chromeos_llvm_hash.LLVMVariant.next
- git_hash = 'a123testhash4'
- svn_version = 1000
- chroot_path = '/some/path/to/chroot'
- patch_metadata_file = 'PATCHES.json'
- git_hash_source = 'google3'
- branch = 'update-LLVM_NEXT_HASH-a123testhash4'
- extra_commit_msg = None
-
- # Verify exception is raised when an exception is thrown within
- # the 'try' block by UprevEbuildSymlink function.
- with self.assertRaises(ValueError) as err:
- update_chromeos_llvm_hash.UpdatePackages(packages_to_update, llvm_variant,
- git_hash, svn_version,
- chroot_path, patch_metadata_file,
- failure_modes.FailureModes.FAIL,
- git_hash_source,
- extra_commit_msg)
-
- self.assertEqual(str(err.exception), 'Failed to uprev the ebuild.')
-
- mock_create_path_dict.assert_called_once_with(chroot_path,
- packages_to_update)
-
- mock_create_repo.assert_called_once_with(path_to_package_dir, branch)
-
- mock_update_llvm_next.assert_called_once_with(abs_path_to_package,
- llvm_variant, git_hash,
- svn_version)
-
- mock_uprev_symlink.assert_called_once_with(symlink_path_to_package)
-
- mock_upload_changes.assert_not_called()
-
- mock_delete_repo.assert_called_once_with(path_to_package_dir, branch)
-
- @mock.patch.object(update_chromeos_llvm_hash, 'EnsurePackageMaskContains')
- @mock.patch.object(get_llvm_hash, 'GetLLVMMajorVersion')
- @mock.patch.object(update_chromeos_llvm_hash,
- 'CreatePathDictionaryFromPackages')
- @mock.patch.object(git, 'CreateBranch')
- @mock.patch.object(update_chromeos_llvm_hash, 'UpdateEbuildLLVMHash')
- @mock.patch.object(update_chromeos_llvm_hash, 'UprevEbuildSymlink')
- @mock.patch.object(git, 'UploadChanges')
- @mock.patch.object(git, 'DeleteBranch')
- @mock.patch.object(llvm_patch_management, 'UpdatePackagesPatchMetadataFile')
- @mock.patch.object(update_chromeos_llvm_hash,
- 'StagePatchMetadataFileForCommit')
- def testSuccessfullyUpdatedPackages(self, mock_stage_patch_file,
- mock_update_package_metadata_file,
- mock_delete_repo, mock_upload_changes,
- mock_uprev_symlink, mock_update_llvm_next,
- mock_create_repo, mock_create_path_dict,
- mock_llvm_version, mock_mask_contains):
-
- path_to_package_dir = '/some/path/to/chroot/src/path/to'
- abs_path_to_package = os.path.join(path_to_package_dir, 'package.ebuild')
- symlink_path_to_package = os.path.join(path_to_package_dir,
- 'package-r1.ebuild')
-
- # Test function to simulate 'CreateBranch' when successfully created the
- # branch for the changes to be made to the ebuild files.
- def SuccessfullyCreateBranchForChanges(_, branch):
- self.assertEqual(branch, 'update-LLVM_NEXT_HASH-a123testhash5')
-
- # Test function to simulate 'UploadChanges' after a successfull update of
- # 'LLVM_NEXT_HASH" of the ebuild file.
- def SuccessfullyUpdatedLLVMHash(ebuild_path, _, git_hash, svn_version):
- self.assertEqual(ebuild_path,
- '/some/path/to/chroot/src/path/to/package.ebuild')
- self.assertEqual(git_hash, 'a123testhash5')
- self.assertEqual(svn_version, 1000)
-
- # Test function to simulate 'UprevEbuildSymlink' when successfully
- # incremented the revision number by 1.
- def SuccessfullyUprevedEbuildSymlink(symlink_path):
- self.assertEqual(symlink_path,
- '/some/path/to/chroot/src/path/to/package-r1.ebuild')
-
- # Test function to simulate 'UpdatePackagesPatchMetadataFile()' when the
- # patch results contains a disabled patch in 'disable_patches' mode.
- def RetrievedPatchResults(chroot_path, svn_version, patch_metadata_file,
- packages, mode):
-
- self.assertEqual(chroot_path, '/some/path/to/chroot')
- self.assertEqual(svn_version, 1000)
- self.assertEqual(patch_metadata_file, 'PATCHES.json')
- self.assertListEqual(packages, ['path/to'])
- self.assertEqual(mode, failure_modes.FailureModes.DISABLE_PATCHES)
-
- PatchInfo = collections.namedtuple('PatchInfo', [
- 'applied_patches', 'failed_patches', 'non_applicable_patches',
- 'disabled_patches', 'removed_patches', 'modified_metadata'
- ])
-
- package_patch_info = PatchInfo(
- applied_patches=['fix_display.patch'],
- failed_patches=['fix_stdout.patch'],
- non_applicable_patches=[],
- disabled_patches=['fix_stdout.patch'],
- removed_patches=[],
- modified_metadata='/abs/path/to/filesdir/%s' % patch_metadata_file)
-
- package_info_dict = {'path/to': package_patch_info._asdict()}
-
- # Returns a dictionary where the key is the package and the value is a
- # dictionary that contains information about the package's patch results
- # produced by the patch manager.
- return package_info_dict
-
- # Test function to simulate 'UploadChanges()' when successfully created a
- # commit for the changes made to the packages and their patches and
- # retrieved the change list of the commit.
- def SuccessfullyUploadedChanges(*args):
- assert len(args) == 3
- commit_url = 'https://some_name/path/to/commit/+/12345'
- return git.CommitContents(url=commit_url, cl_number=12345)
-
- test_package_path_dict = {symlink_path_to_package: abs_path_to_package}
-
- # Simulate behavior of 'CreatePathDictionaryFromPackages()' when
- # successfully created a dictionary where the key is the absolute path to
- # the symlink of the package and value is the absolute path to the ebuild of
- # the package.
- mock_create_path_dict.return_value = test_package_path_dict
-
- # Use test function to simulate behavior.
- mock_create_repo.side_effect = SuccessfullyCreateBranchForChanges
- mock_update_llvm_next.side_effect = SuccessfullyUpdatedLLVMHash
- mock_uprev_symlink.side_effect = SuccessfullyUprevedEbuildSymlink
- mock_update_package_metadata_file.side_effect = RetrievedPatchResults
- mock_upload_changes.side_effect = SuccessfullyUploadedChanges
- mock_llvm_version.return_value = '1234'
- mock_mask_contains.reurn_value = None
-
- packages_to_update = ['test-packages/package1']
- llvm_variant = update_chromeos_llvm_hash.LLVMVariant.next
- git_hash = 'a123testhash5'
- svn_version = 1000
- chroot_path = '/some/path/to/chroot'
- patch_metadata_file = 'PATCHES.json'
- git_hash_source = 'tot'
- branch = 'update-LLVM_NEXT_HASH-a123testhash5'
- extra_commit_msg = '\ncommit-message-end'
-
- change_list = update_chromeos_llvm_hash.UpdatePackages(
- packages_to_update, llvm_variant, git_hash, svn_version, chroot_path,
- patch_metadata_file, failure_modes.FailureModes.DISABLE_PATCHES,
- git_hash_source, extra_commit_msg)
-
- self.assertEqual(change_list.url,
- 'https://some_name/path/to/commit/+/12345')
-
- self.assertEqual(change_list.cl_number, 12345)
-
- mock_create_path_dict.assert_called_once_with(chroot_path,
- packages_to_update)
-
- mock_create_repo.assert_called_once_with(path_to_package_dir, branch)
-
- mock_update_llvm_next.assert_called_once_with(abs_path_to_package,
- llvm_variant, git_hash,
- svn_version)
-
- mock_uprev_symlink.assert_called_once_with(symlink_path_to_package)
-
- mock_mask_contains.assert_called_once_with(chroot_path, git_hash)
-
- expected_commit_messages = [
- 'llvm-next/tot: upgrade to a123testhash5 (r1000)\n',
- 'The following packages have been updated:', 'path/to',
- '\nFor the package path/to:',
- 'The patch metadata file PATCHES.json was modified',
- 'The following patches were disabled:', 'fix_stdout.patch',
- '\ncommit-message-end'
- ]
-
- mock_update_package_metadata_file.assert_called_once()
-
- mock_stage_patch_file.assert_called_once_with(
- '/abs/path/to/filesdir/PATCHES.json')
-
- mock_upload_changes.assert_called_once_with(path_to_package_dir, branch,
- expected_commit_messages)
-
- mock_delete_repo.assert_called_once_with(path_to_package_dir, branch)
-
- @mock.patch.object(subprocess, 'check_output', return_value=None)
- @mock.patch.object(get_llvm_hash, 'GetLLVMMajorVersion')
- def testEnsurePackageMaskContainsExisting(self, mock_llvm_version,
- mock_git_add):
- chroot_path = 'absolute/path/to/chroot'
- git_hash = 'badf00d'
- mock_llvm_version.return_value = '1234'
- with mock.patch(
- 'update_chromeos_llvm_hash.open',
- mock.mock_open(read_data='\n=sys-devel/llvm-1234.0_pre*\n'),
- create=True) as mock_file:
- update_chromeos_llvm_hash.EnsurePackageMaskContains(chroot_path, git_hash)
- handle = mock_file()
- handle.write.assert_not_called()
- mock_llvm_version.assert_called_once_with(git_hash)
-
- overlay_dir = 'absolute/path/to/chroot/src/third_party/chromiumos-overlay'
- mask_path = overlay_dir + '/profiles/targets/chromeos/package.mask'
- mock_git_add.assert_called_once_with(
- ['git', '-C', overlay_dir, 'add', mask_path])
-
- @mock.patch.object(subprocess, 'check_output', return_value=None)
- @mock.patch.object(get_llvm_hash, 'GetLLVMMajorVersion')
- def testEnsurePackageMaskContainsNotExisting(self, mock_llvm_version,
- mock_git_add):
- chroot_path = 'absolute/path/to/chroot'
- git_hash = 'badf00d'
- mock_llvm_version.return_value = '1234'
- with mock.patch(
- 'update_chromeos_llvm_hash.open',
- mock.mock_open(read_data='nothing relevant'),
- create=True) as mock_file:
- update_chromeos_llvm_hash.EnsurePackageMaskContains(chroot_path, git_hash)
- handle = mock_file()
- handle.write.assert_called_once_with('=sys-devel/llvm-1234.0_pre*\n')
- mock_llvm_version.assert_called_once_with(git_hash)
-
- overlay_dir = 'absolute/path/to/chroot/src/third_party/chromiumos-overlay'
- mask_path = overlay_dir + '/profiles/targets/chromeos/package.mask'
- mock_git_add.assert_called_once_with(
- ['git', '-C', overlay_dir, 'add', mask_path])
-
-
-if __name__ == '__main__':
- unittest.main()
+ """Test class for updating LLVM hashes of packages."""
+
+ @mock.patch.object(os.path, "realpath")
+ def testDefaultCrosRootFromCrOSCheckout(self, mock_llvm_tools):
+ llvm_tools_path = (
+ "/path/to/cros/src/third_party/toolchain-utils/llvm_tools"
+ )
+ mock_llvm_tools.return_value = llvm_tools_path
+ self.assertEqual(
+ update_chromeos_llvm_hash.defaultCrosRoot(), Path("/path/to/cros")
+ )
+
+ @mock.patch.object(os.path, "realpath")
+ def testDefaultCrosRootFromOutsideCrOSCheckout(self, mock_llvm_tools):
+ mock_llvm_tools.return_value = "~/toolchain-utils/llvm_tools"
+ self.assertEqual(
+ update_chromeos_llvm_hash.defaultCrosRoot(),
+ Path.home() / "chromiumos",
+ )
+
+ # Simulate behavior of 'os.path.isfile()' when the ebuild path to a package
+ # does not exist.
+ @mock.patch.object(os.path, "isfile", return_value=False)
+ def testFailedToUpdateLLVMHashForInvalidEbuildPath(self, mock_isfile):
+ ebuild_path = "/some/path/to/package.ebuild"
+ llvm_variant = update_chromeos_llvm_hash.LLVMVariant.current
+ git_hash = "a123testhash1"
+ svn_version = 1000
+
+ # Verify the exception is raised when the ebuild path does not exist.
+ with self.assertRaises(ValueError) as err:
+ update_chromeos_llvm_hash.UpdateEbuildLLVMHash(
+ ebuild_path, llvm_variant, git_hash, svn_version
+ )
+
+ self.assertEqual(
+ str(err.exception), "Invalid ebuild path provided: %s" % ebuild_path
+ )
+
+ mock_isfile.assert_called_once()
+
+ # Simulate 'os.path.isfile' behavior on a valid ebuild path.
+ @mock.patch.object(os.path, "isfile", return_value=True)
+ def testFailedToUpdateLLVMHash(self, mock_isfile):
+ # Create a temporary file to simulate an ebuild file of a package.
+ with test_helpers.CreateTemporaryJsonFile() as ebuild_file:
+ with open(ebuild_file, "w") as f:
+ f.write(
+ "\n".join(
+ [
+ "First line in the ebuild",
+ "Second line in the ebuild",
+ "Last line in the ebuild",
+ ]
+ )
+ )
+
+ llvm_variant = update_chromeos_llvm_hash.LLVMVariant.current
+ git_hash = "a123testhash1"
+ svn_version = 1000
+
+ # Verify the exception is raised when the ebuild file does not have
+ # 'LLVM_HASH'.
+ with self.assertRaises(ValueError) as err:
+ update_chromeos_llvm_hash.UpdateEbuildLLVMHash(
+ ebuild_file, llvm_variant, git_hash, svn_version
+ )
+
+ self.assertEqual(str(err.exception), "Failed to update LLVM_HASH")
+
+ llvm_variant = update_chromeos_llvm_hash.LLVMVariant.next
+
+ self.assertEqual(mock_isfile.call_count, 2)
+
+ # Simulate 'os.path.isfile' behavior on a valid ebuild path.
+ @mock.patch.object(os.path, "isfile", return_value=True)
+ def testFailedToUpdateLLVMNextHash(self, mock_isfile):
+ # Create a temporary file to simulate an ebuild file of a package.
+ with test_helpers.CreateTemporaryJsonFile() as ebuild_file:
+ with open(ebuild_file, "w") as f:
+ f.write(
+ "\n".join(
+ [
+ "First line in the ebuild",
+ "Second line in the ebuild",
+ "Last line in the ebuild",
+ ]
+ )
+ )
+
+ llvm_variant = update_chromeos_llvm_hash.LLVMVariant.next
+ git_hash = "a123testhash1"
+ svn_version = 1000
+
+ # Verify the exception is raised when the ebuild file does not have
+ # 'LLVM_NEXT_HASH'.
+ with self.assertRaises(ValueError) as err:
+ update_chromeos_llvm_hash.UpdateEbuildLLVMHash(
+ ebuild_file, llvm_variant, git_hash, svn_version
+ )
+
+ self.assertEqual(
+ str(err.exception), "Failed to update LLVM_NEXT_HASH"
+ )
+
+ self.assertEqual(mock_isfile.call_count, 2)
+
+ @mock.patch.object(os.path, "isfile", return_value=True)
+ @mock.patch.object(subprocess, "check_output", return_value=None)
+ def testSuccessfullyStageTheEbuildForCommitForLLVMHashUpdate(
+ self, mock_stage_commit_command, mock_isfile
+ ):
+
+ # Create a temporary file to simulate an ebuild file of a package.
+ with test_helpers.CreateTemporaryJsonFile() as ebuild_file:
+ # Updates LLVM_HASH to 'git_hash' and revision to
+ # 'svn_version'.
+ llvm_variant = update_chromeos_llvm_hash.LLVMVariant.current
+ git_hash = "a123testhash1"
+ svn_version = 1000
+
+ with open(ebuild_file, "w") as f:
+ f.write(
+ "\n".join(
+ [
+ "First line in the ebuild",
+ "Second line in the ebuild",
+ 'LLVM_HASH="a12b34c56d78e90" # r500',
+ "Last line in the ebuild",
+ ]
+ )
+ )
+
+ update_chromeos_llvm_hash.UpdateEbuildLLVMHash(
+ ebuild_file, llvm_variant, git_hash, svn_version
+ )
+
+ expected_file_contents = [
+ "First line in the ebuild\n",
+ "Second line in the ebuild\n",
+ 'LLVM_HASH="a123testhash1" # r1000\n',
+ "Last line in the ebuild",
+ ]
+
+ # Verify the new file contents of the ebuild file match the expected file
+ # contents.
+ with open(ebuild_file) as new_file:
+ file_contents_as_a_list = [cur_line for cur_line in new_file]
+ self.assertListEqual(
+ file_contents_as_a_list, expected_file_contents
+ )
+
+ self.assertEqual(mock_isfile.call_count, 2)
+
+ mock_stage_commit_command.assert_called_once()
+
+ @mock.patch.object(os.path, "isfile", return_value=True)
+ @mock.patch.object(subprocess, "check_output", return_value=None)
+ def testSuccessfullyStageTheEbuildForCommitForLLVMNextHashUpdate(
+ self, mock_stage_commit_command, mock_isfile
+ ):
+
+ # Create a temporary file to simulate an ebuild file of a package.
+ with test_helpers.CreateTemporaryJsonFile() as ebuild_file:
+ # Updates LLVM_NEXT_HASH to 'git_hash' and revision to
+ # 'svn_version'.
+ llvm_variant = update_chromeos_llvm_hash.LLVMVariant.next
+ git_hash = "a123testhash1"
+ svn_version = 1000
+
+ with open(ebuild_file, "w") as f:
+ f.write(
+ "\n".join(
+ [
+ "First line in the ebuild",
+ "Second line in the ebuild",
+ 'LLVM_NEXT_HASH="a12b34c56d78e90" # r500',
+ "Last line in the ebuild",
+ ]
+ )
+ )
+
+ update_chromeos_llvm_hash.UpdateEbuildLLVMHash(
+ ebuild_file, llvm_variant, git_hash, svn_version
+ )
+
+ expected_file_contents = [
+ "First line in the ebuild\n",
+ "Second line in the ebuild\n",
+ 'LLVM_NEXT_HASH="a123testhash1" # r1000\n',
+ "Last line in the ebuild",
+ ]
+
+ # Verify the new file contents of the ebuild file match the expected file
+ # contents.
+ with open(ebuild_file) as new_file:
+ file_contents_as_a_list = [cur_line for cur_line in new_file]
+ self.assertListEqual(
+ file_contents_as_a_list, expected_file_contents
+ )
+
+ self.assertEqual(mock_isfile.call_count, 2)
+
+ mock_stage_commit_command.assert_called_once()
+
+ @mock.patch.object(get_llvm_hash, "GetLLVMMajorVersion")
+ @mock.patch.object(os.path, "islink", return_value=False)
+ def testFailedToUprevEbuildToVersionForInvalidSymlink(
+ self, mock_islink, mock_llvm_version
+ ):
+ symlink_path = "/path/to/chroot/package/package.ebuild"
+ svn_version = 1000
+ git_hash = "badf00d"
+ mock_llvm_version.return_value = "1234"
+
+ # Verify the exception is raised when a invalid symbolic link is passed in.
+ with self.assertRaises(ValueError) as err:
+ update_chromeos_llvm_hash.UprevEbuildToVersion(
+ symlink_path, svn_version, git_hash
+ )
+
+ self.assertEqual(
+ str(err.exception), "Invalid symlink provided: %s" % symlink_path
+ )
+
+ mock_islink.assert_called_once()
+ mock_llvm_version.assert_not_called()
+
+ @mock.patch.object(os.path, "islink", return_value=False)
+ def testFailedToUprevEbuildSymlinkForInvalidSymlink(self, mock_islink):
+ symlink_path = "/path/to/chroot/package/package.ebuild"
+
+ # Verify the exception is raised when a invalid symbolic link is passed in.
+ with self.assertRaises(ValueError) as err:
+ update_chromeos_llvm_hash.UprevEbuildSymlink(symlink_path)
+
+ self.assertEqual(
+ str(err.exception), "Invalid symlink provided: %s" % symlink_path
+ )
+
+ mock_islink.assert_called_once()
+
+ @mock.patch.object(get_llvm_hash, "GetLLVMMajorVersion")
+ # Simulate 'os.path.islink' when a symbolic link is passed in.
+ @mock.patch.object(os.path, "islink", return_value=True)
+ # Simulate 'os.path.realpath' when a symbolic link is passed in.
+ @mock.patch.object(os.path, "realpath", return_value=True)
+ def testFailedToUprevEbuildToVersion(
+ self, mock_realpath, mock_islink, mock_llvm_version
+ ):
+ symlink_path = "/path/to/chroot/llvm/llvm_pre123_p.ebuild"
+ mock_realpath.return_value = "/abs/path/to/llvm/llvm_pre123_p.ebuild"
+ git_hash = "badf00d"
+ mock_llvm_version.return_value = "1234"
+ svn_version = 1000
+
+ # Verify the exception is raised when the symlink does not match the
+ # expected pattern
+ with self.assertRaises(ValueError) as err:
+ update_chromeos_llvm_hash.UprevEbuildToVersion(
+ symlink_path, svn_version, git_hash
+ )
+
+ self.assertEqual(str(err.exception), "Failed to uprev the ebuild.")
+
+ mock_llvm_version.assert_called_once_with(git_hash)
+ mock_islink.assert_called_once_with(symlink_path)
+
+ # Simulate 'os.path.islink' when a symbolic link is passed in.
+ @mock.patch.object(os.path, "islink", return_value=True)
+ def testFailedToUprevEbuildSymlink(self, mock_islink):
+ symlink_path = "/path/to/chroot/llvm/llvm_pre123_p.ebuild"
+
+ # Verify the exception is raised when the symlink does not match the
+ # expected pattern
+ with self.assertRaises(ValueError) as err:
+ update_chromeos_llvm_hash.UprevEbuildSymlink(symlink_path)
+
+ self.assertEqual(str(err.exception), "Failed to uprev the symlink.")
+
+ mock_islink.assert_called_once_with(symlink_path)
+
+ @mock.patch.object(get_llvm_hash, "GetLLVMMajorVersion")
+ @mock.patch.object(os.path, "islink", return_value=True)
+ @mock.patch.object(os.path, "realpath")
+ @mock.patch.object(subprocess, "check_output", return_value=None)
+ def testSuccessfullyUprevEbuildToVersionLLVM(
+ self, mock_command_output, mock_realpath, mock_islink, mock_llvm_version
+ ):
+ symlink = "/path/to/llvm/llvm-12.0_pre3_p2-r10.ebuild"
+ ebuild = "/abs/path/to/llvm/llvm-12.0_pre3_p2.ebuild"
+ mock_realpath.return_value = ebuild
+ git_hash = "badf00d"
+ mock_llvm_version.return_value = "1234"
+ svn_version = 1000
+
+ update_chromeos_llvm_hash.UprevEbuildToVersion(
+ symlink, svn_version, git_hash
+ )
+
+ mock_llvm_version.assert_called_once_with(git_hash)
+
+ mock_islink.assert_called()
+
+ mock_realpath.assert_called_once_with(symlink)
+
+ mock_command_output.assert_called()
+
+ # Verify commands
+ symlink_dir = os.path.dirname(symlink)
+ timestamp = datetime.datetime.today().strftime("%Y%m%d")
+ new_ebuild = (
+ "/abs/path/to/llvm/llvm-1234.0_pre1000_p%s.ebuild" % timestamp
+ )
+ new_symlink = new_ebuild[: -len(".ebuild")] + "-r1.ebuild"
+
+ expected_cmd = ["git", "-C", symlink_dir, "mv", ebuild, new_ebuild]
+ self.assertEqual(
+ mock_command_output.call_args_list[0], mock.call(expected_cmd)
+ )
+
+ expected_cmd = ["ln", "-s", "-r", new_ebuild, new_symlink]
+ self.assertEqual(
+ mock_command_output.call_args_list[1], mock.call(expected_cmd)
+ )
+
+ expected_cmd = ["git", "-C", symlink_dir, "add", new_symlink]
+ self.assertEqual(
+ mock_command_output.call_args_list[2], mock.call(expected_cmd)
+ )
+
+ expected_cmd = ["git", "-C", symlink_dir, "rm", symlink]
+ self.assertEqual(
+ mock_command_output.call_args_list[3], mock.call(expected_cmd)
+ )
+
+ @mock.patch.object(
+ chroot,
+ "GetChrootEbuildPaths",
+ return_value=["/chroot/path/test.ebuild"],
+ )
+ @mock.patch.object(subprocess, "check_output", return_value="")
+ def testManifestUpdate(self, mock_subprocess, mock_ebuild_paths):
+ manifest_packages = ["sys-devel/llvm"]
+ chroot_path = "/path/to/chroot"
+ update_chromeos_llvm_hash.UpdateManifests(
+ manifest_packages, chroot_path
+ )
+
+ args = mock_subprocess.call_args[0][-1]
+ manifest_cmd = [
+ "cros_sdk",
+ "--",
+ "ebuild",
+ "/chroot/path/test.ebuild",
+ "manifest",
+ ]
+ self.assertEqual(args, manifest_cmd)
+ mock_ebuild_paths.assert_called_once()
+
+ @mock.patch.object(get_llvm_hash, "GetLLVMMajorVersion")
+ @mock.patch.object(os.path, "islink", return_value=True)
+ @mock.patch.object(os.path, "realpath")
+ @mock.patch.object(subprocess, "check_output", return_value=None)
+ def testSuccessfullyUprevEbuildToVersionNonLLVM(
+ self, mock_command_output, mock_realpath, mock_islink, mock_llvm_version
+ ):
+ symlink = (
+ "/abs/path/to/compiler-rt/compiler-rt-12.0_pre314159265-r4.ebuild"
+ )
+ ebuild = "/abs/path/to/compiler-rt/compiler-rt-12.0_pre314159265.ebuild"
+ mock_realpath.return_value = ebuild
+ mock_llvm_version.return_value = "1234"
+ svn_version = 1000
+ git_hash = "5678"
+
+ update_chromeos_llvm_hash.UprevEbuildToVersion(
+ symlink, svn_version, git_hash
+ )
+
+ mock_islink.assert_called()
+
+ mock_realpath.assert_called_once_with(symlink)
+
+ mock_llvm_version.assert_called_once_with(git_hash)
+
+ mock_command_output.assert_called()
+
+ # Verify commands
+ symlink_dir = os.path.dirname(symlink)
+ new_ebuild = (
+ "/abs/path/to/compiler-rt/compiler-rt-1234.0_pre1000.ebuild"
+ )
+ new_symlink = new_ebuild[: -len(".ebuild")] + "-r1.ebuild"
+
+ expected_cmd = ["git", "-C", symlink_dir, "mv", ebuild, new_ebuild]
+ self.assertEqual(
+ mock_command_output.call_args_list[0], mock.call(expected_cmd)
+ )
+
+ expected_cmd = ["ln", "-s", "-r", new_ebuild, new_symlink]
+ self.assertEqual(
+ mock_command_output.call_args_list[1], mock.call(expected_cmd)
+ )
+
+ expected_cmd = ["git", "-C", symlink_dir, "add", new_symlink]
+ self.assertEqual(
+ mock_command_output.call_args_list[2], mock.call(expected_cmd)
+ )
+
+ expected_cmd = ["git", "-C", symlink_dir, "rm", symlink]
+ self.assertEqual(
+ mock_command_output.call_args_list[3], mock.call(expected_cmd)
+ )
+
+ @mock.patch.object(os.path, "islink", return_value=True)
+ @mock.patch.object(subprocess, "check_output", return_value=None)
+ def testSuccessfullyUprevEbuildSymlink(
+ self, mock_command_output, mock_islink
+ ):
+ symlink_to_uprev = "/symlink/to/package-r1.ebuild"
+
+ update_chromeos_llvm_hash.UprevEbuildSymlink(symlink_to_uprev)
+
+ mock_islink.assert_called_once_with(symlink_to_uprev)
+
+ mock_command_output.assert_called_once()
+
+ # Simulate behavior of 'os.path.isdir()' when the path to the repo is not a
+
+ # directory.
+
+ @mock.patch.object(chroot, "GetChrootEbuildPaths")
+ @mock.patch.object(chroot, "ConvertChrootPathsToAbsolutePaths")
+ def testExceptionRaisedWhenCreatingPathDictionaryFromPackages(
+ self, mock_chroot_paths_to_symlinks, mock_get_chroot_paths
+ ):
+
+ chroot_path = "/some/path/to/chroot"
+
+ package_name = "test-pckg/package"
+ package_chroot_path = "/some/chroot/path/to/package-r1.ebuild"
+
+ # Test function to simulate 'ConvertChrootPathsToAbsolutePaths' when a
+ # symlink does not start with the prefix '/mnt/host/source'.
+ def BadPrefixChrootPath(*args):
+ assert len(args) == 2
+ raise ValueError(
+ "Invalid prefix for the chroot path: "
+ "%s" % package_chroot_path
+ )
+
+ # Simulate 'GetChrootEbuildPaths' when valid packages are passed in.
+ #
+ # Returns a list of chroot paths.
+ mock_get_chroot_paths.return_value = [package_chroot_path]
+
+ # Use test function to simulate 'ConvertChrootPathsToAbsolutePaths'
+ # behavior.
+ mock_chroot_paths_to_symlinks.side_effect = BadPrefixChrootPath
+
+ # Verify exception is raised when for an invalid prefix in the symlink.
+ with self.assertRaises(ValueError) as err:
+ update_chromeos_llvm_hash.CreatePathDictionaryFromPackages(
+ chroot_path, [package_name]
+ )
+
+ self.assertEqual(
+ str(err.exception),
+ "Invalid prefix for the chroot path: " "%s" % package_chroot_path,
+ )
+
+ mock_get_chroot_paths.assert_called_once_with(
+ chroot_path, [package_name]
+ )
+
+ mock_chroot_paths_to_symlinks.assert_called_once_with(
+ chroot_path, [package_chroot_path]
+ )
+
+ @mock.patch.object(chroot, "GetChrootEbuildPaths")
+ @mock.patch.object(chroot, "ConvertChrootPathsToAbsolutePaths")
+ @mock.patch.object(
+ update_chromeos_llvm_hash, "GetEbuildPathsFromSymLinkPaths"
+ )
+ def testSuccessfullyCreatedPathDictionaryFromPackages(
+ self,
+ mock_ebuild_paths_from_symlink_paths,
+ mock_chroot_paths_to_symlinks,
+ mock_get_chroot_paths,
+ ):
+
+ package_chroot_path = "/mnt/host/source/src/path/to/package-r1.ebuild"
+
+ # Simulate 'GetChrootEbuildPaths' when returning a chroot path for a valid
+ # package.
+ #
+ # Returns a list of chroot paths.
+ mock_get_chroot_paths.return_value = [package_chroot_path]
+
+ package_symlink_path = (
+ "/some/path/to/chroot/src/path/to/package-r1.ebuild"
+ )
+
+ # Simulate 'ConvertChrootPathsToAbsolutePaths' when returning a symlink to
+ # a chroot path that points to a package.
+ #
+ # Returns a list of symlink file paths.
+ mock_chroot_paths_to_symlinks.return_value = [package_symlink_path]
+
+ chroot_package_path = "/some/path/to/chroot/src/path/to/package.ebuild"
+
+ # Simulate 'GetEbuildPathsFromSymlinkPaths' when returning a dictionary of
+ # a symlink that points to an ebuild.
+ #
+ # Returns a dictionary of a symlink and ebuild file path pair
+ # where the key is the absolute path to the symlink of the ebuild file
+ # and the value is the absolute path to the ebuild file of the package.
+ mock_ebuild_paths_from_symlink_paths.return_value = {
+ package_symlink_path: chroot_package_path
+ }
+
+ chroot_path = "/some/path/to/chroot"
+ package_name = "test-pckg/package"
+
+ self.assertEqual(
+ update_chromeos_llvm_hash.CreatePathDictionaryFromPackages(
+ chroot_path, [package_name]
+ ),
+ {package_symlink_path: chroot_package_path},
+ )
+
+ mock_get_chroot_paths.assert_called_once_with(
+ chroot_path, [package_name]
+ )
+
+ mock_chroot_paths_to_symlinks.assert_called_once_with(
+ chroot_path, [package_chroot_path]
+ )
+
+ mock_ebuild_paths_from_symlink_paths.assert_called_once_with(
+ [package_symlink_path]
+ )
+
+ @mock.patch.object(subprocess, "check_output", return_value=None)
+ def testSuccessfullyRemovedPatchesFromFilesDir(self, mock_run_cmd):
+ patches_to_remove_list = [
+ "/abs/path/to/filesdir/cherry/fix_output.patch",
+ "/abs/path/to/filesdir/display_results.patch",
+ ]
+
+ update_chromeos_llvm_hash.RemovePatchesFromFilesDir(
+ patches_to_remove_list
+ )
+
+ self.assertEqual(mock_run_cmd.call_count, 2)
+
+ @mock.patch.object(os.path, "isfile", return_value=False)
+ def testInvalidPatchMetadataFileStagedForCommit(self, mock_isfile):
+ patch_metadata_path = "/abs/path/to/filesdir/PATCHES"
+
+ # Verify the exception is raised when the absolute path to the patch
+ # metadata file does not exist or is not a file.
+ with self.assertRaises(ValueError) as err:
+ update_chromeos_llvm_hash.StagePatchMetadataFileForCommit(
+ patch_metadata_path
+ )
+
+ self.assertEqual(
+ str(err.exception),
+ "Invalid patch metadata file provided: " "%s" % patch_metadata_path,
+ )
+
+ mock_isfile.assert_called_once()
+
+ @mock.patch.object(os.path, "isfile", return_value=True)
+ @mock.patch.object(subprocess, "check_output", return_value=None)
+ def testSuccessfullyStagedPatchMetadataFileForCommit(self, mock_run_cmd, _):
+
+ patch_metadata_path = "/abs/path/to/filesdir/PATCHES.json"
+
+ update_chromeos_llvm_hash.StagePatchMetadataFileForCommit(
+ patch_metadata_path
+ )
+
+ mock_run_cmd.assert_called_once()
+
+ def testNoPatchResultsForCommit(self):
+ package_1_patch_info_dict = {
+ "applied_patches": ["display_results.patch"],
+ "failed_patches": ["fixes_output.patch"],
+ "non_applicable_patches": [],
+ "disabled_patches": [],
+ "removed_patches": [],
+ "modified_metadata": None,
+ }
+
+ package_2_patch_info_dict = {
+ "applied_patches": ["redirects_stdout.patch", "fix_display.patch"],
+ "failed_patches": [],
+ "non_applicable_patches": [],
+ "disabled_patches": [],
+ "removed_patches": [],
+ "modified_metadata": None,
+ }
+
+ test_package_info_dict = {
+ "test-packages/package1": package_1_patch_info_dict,
+ "test-packages/package2": package_2_patch_info_dict,
+ }
+
+ test_commit_message = ["Updated packages"]
+
+ self.assertListEqual(
+ update_chromeos_llvm_hash.StagePackagesPatchResultsForCommit(
+ test_package_info_dict, test_commit_message
+ ),
+ test_commit_message,
+ )
+
+ @mock.patch.object(
+ update_chromeos_llvm_hash, "StagePatchMetadataFileForCommit"
+ )
+ @mock.patch.object(update_chromeos_llvm_hash, "RemovePatchesFromFilesDir")
+ def testAddedPatchResultsForCommit(
+ self, mock_remove_patches, mock_stage_patches_for_commit
+ ):
+
+ package_1_patch_info_dict = {
+ "applied_patches": [],
+ "failed_patches": [],
+ "non_applicable_patches": [],
+ "disabled_patches": ["fixes_output.patch"],
+ "removed_patches": [],
+ "modified_metadata": "/abs/path/to/filesdir/PATCHES.json",
+ }
+
+ package_2_patch_info_dict = {
+ "applied_patches": ["fix_display.patch"],
+ "failed_patches": [],
+ "non_applicable_patches": [],
+ "disabled_patches": [],
+ "removed_patches": ["/abs/path/to/filesdir/redirect_stdout.patch"],
+ "modified_metadata": "/abs/path/to/filesdir/PATCHES.json",
+ }
+
+ test_package_info_dict = {
+ "test-packages/package1": package_1_patch_info_dict,
+ "test-packages/package2": package_2_patch_info_dict,
+ }
+
+ test_commit_message = ["Updated packages"]
+
+ expected_commit_messages = [
+ "Updated packages",
+ "\nFor the package test-packages/package1:",
+ "The patch metadata file PATCHES.json was modified",
+ "The following patches were disabled:",
+ "fixes_output.patch",
+ "\nFor the package test-packages/package2:",
+ "The patch metadata file PATCHES.json was modified",
+ "The following patches were removed:",
+ "redirect_stdout.patch",
+ ]
+
+ self.assertListEqual(
+ update_chromeos_llvm_hash.StagePackagesPatchResultsForCommit(
+ test_package_info_dict, test_commit_message
+ ),
+ expected_commit_messages,
+ )
+
+ path_to_removed_patch = "/abs/path/to/filesdir/redirect_stdout.patch"
+
+ mock_remove_patches.assert_called_once_with([path_to_removed_patch])
+
+ self.assertEqual(mock_stage_patches_for_commit.call_count, 2)
+
+ @mock.patch.object(get_llvm_hash, "GetLLVMMajorVersion")
+ @mock.patch.object(
+ update_chromeos_llvm_hash, "CreatePathDictionaryFromPackages"
+ )
+ @mock.patch.object(git, "CreateBranch")
+ @mock.patch.object(update_chromeos_llvm_hash, "UpdateEbuildLLVMHash")
+ @mock.patch.object(update_chromeos_llvm_hash, "UprevEbuildSymlink")
+ @mock.patch.object(git, "UploadChanges")
+ @mock.patch.object(git, "DeleteBranch")
+ @mock.patch.object(os.path, "realpath")
+ def testExceptionRaisedWhenUpdatingPackages(
+ self,
+ mock_realpath,
+ mock_delete_repo,
+ mock_upload_changes,
+ mock_uprev_symlink,
+ mock_update_llvm_next,
+ mock_create_repo,
+ mock_create_path_dict,
+ mock_llvm_major_version,
+ ):
+
+ path_to_package_dir = "/some/path/to/chroot/src/path/to"
+ abs_path_to_package = os.path.join(
+ path_to_package_dir, "package.ebuild"
+ )
+ symlink_path_to_package = os.path.join(
+ path_to_package_dir, "package-r1.ebuild"
+ )
+
+ mock_llvm_major_version.return_value = "1234"
+
+ # Test function to simulate 'CreateBranch' when successfully created the
+ # branch on a valid repo path.
+ def SuccessfullyCreateBranchForChanges(_, branch):
+ self.assertEqual(branch, "update-LLVM_NEXT_HASH-a123testhash4")
+
+ # Test function to simulate 'UpdateEbuildLLVMHash' when successfully
+ # updated the ebuild's 'LLVM_NEXT_HASH'.
+ def SuccessfullyUpdatedLLVMHash(ebuild_path, _, git_hash, svn_version):
+ self.assertEqual(ebuild_path, abs_path_to_package)
+ self.assertEqual(git_hash, "a123testhash4")
+ self.assertEqual(svn_version, 1000)
+
+ # Test function to simulate 'UprevEbuildSymlink' when the symlink to the
+ # ebuild does not have a revision number.
+ def FailedToUprevEbuildSymlink(_):
+ # Raises a 'ValueError' exception because the symlink did not have have a
+ # revision number.
+ raise ValueError("Failed to uprev the ebuild.")
+
+ # Test function to fail on 'UploadChanges' if the function gets called
+ # when an exception is raised.
+ def ShouldNotExecuteUploadChanges(*args):
+ # Test function should not be called (i.e. execution should resume in the
+ # 'finally' block) because 'UprevEbuildSymlink' raised an
+ # exception.
+ assert len(args) == 3
+ assert False, (
+ 'Failed to go to "finally" block '
+ "after the exception was raised."
+ )
+
+ test_package_path_dict = {symlink_path_to_package: abs_path_to_package}
+
+ # Simulate behavior of 'CreatePathDictionaryFromPackages()' when
+ # successfully created a dictionary where the key is the absolute path to
+ # the symlink of the package and value is the absolute path to the ebuild of
+ # the package.
+ mock_create_path_dict.return_value = test_package_path_dict
+
+ # Use test function to simulate behavior.
+ mock_create_repo.side_effect = SuccessfullyCreateBranchForChanges
+ mock_update_llvm_next.side_effect = SuccessfullyUpdatedLLVMHash
+ mock_uprev_symlink.side_effect = FailedToUprevEbuildSymlink
+ mock_upload_changes.side_effect = ShouldNotExecuteUploadChanges
+ mock_realpath.return_value = (
+ "/abs/path/to/test-packages/package1.ebuild"
+ )
+
+ packages_to_update = ["test-packages/package1"]
+ llvm_variant = update_chromeos_llvm_hash.LLVMVariant.next
+ git_hash = "a123testhash4"
+ svn_version = 1000
+ chroot_path = Path("/some/path/to/chroot")
+ git_hash_source = "google3"
+ branch = "update-LLVM_NEXT_HASH-a123testhash4"
+ extra_commit_msg = None
+
+ # Verify exception is raised when an exception is thrown within
+ # the 'try' block by UprevEbuildSymlink function.
+ with self.assertRaises(ValueError) as err:
+ update_chromeos_llvm_hash.UpdatePackages(
+ packages=packages_to_update,
+ manifest_packages=[],
+ llvm_variant=llvm_variant,
+ git_hash=git_hash,
+ svn_version=svn_version,
+ chroot_path=chroot_path,
+ mode=failure_modes.FailureModes.FAIL,
+ git_hash_source=git_hash_source,
+ extra_commit_msg=extra_commit_msg,
+ )
+
+ self.assertEqual(str(err.exception), "Failed to uprev the ebuild.")
+
+ mock_create_path_dict.assert_called_once_with(
+ chroot_path, packages_to_update
+ )
+
+ mock_create_repo.assert_called_once_with(path_to_package_dir, branch)
+
+ mock_update_llvm_next.assert_called_once_with(
+ abs_path_to_package, llvm_variant, git_hash, svn_version
+ )
+
+ mock_uprev_symlink.assert_called_once_with(symlink_path_to_package)
+
+ mock_upload_changes.assert_not_called()
+
+ mock_delete_repo.assert_called_once_with(path_to_package_dir, branch)
+
+ @mock.patch.object(update_chromeos_llvm_hash, "EnsurePackageMaskContains")
+ @mock.patch.object(get_llvm_hash, "GetLLVMMajorVersion")
+ @mock.patch.object(
+ update_chromeos_llvm_hash, "CreatePathDictionaryFromPackages"
+ )
+ @mock.patch.object(git, "CreateBranch")
+ @mock.patch.object(update_chromeos_llvm_hash, "UpdateEbuildLLVMHash")
+ @mock.patch.object(update_chromeos_llvm_hash, "UprevEbuildSymlink")
+ @mock.patch.object(git, "UploadChanges")
+ @mock.patch.object(git, "DeleteBranch")
+ @mock.patch.object(
+ update_chromeos_llvm_hash, "UpdatePackagesPatchMetadataFile"
+ )
+ @mock.patch.object(
+ update_chromeos_llvm_hash, "StagePatchMetadataFileForCommit"
+ )
+ def testSuccessfullyUpdatedPackages(
+ self,
+ mock_stage_patch_file,
+ mock_update_package_metadata_file,
+ mock_delete_repo,
+ mock_upload_changes,
+ mock_uprev_symlink,
+ mock_update_llvm_next,
+ mock_create_repo,
+ mock_create_path_dict,
+ mock_llvm_version,
+ mock_mask_contains,
+ ):
+
+ path_to_package_dir = "/some/path/to/chroot/src/path/to"
+ abs_path_to_package = os.path.join(
+ path_to_package_dir, "package.ebuild"
+ )
+ symlink_path_to_package = os.path.join(
+ path_to_package_dir, "package-r1.ebuild"
+ )
+
+ # Test function to simulate 'CreateBranch' when successfully created the
+ # branch for the changes to be made to the ebuild files.
+ def SuccessfullyCreateBranchForChanges(_, branch):
+ self.assertEqual(branch, "update-LLVM_NEXT_HASH-a123testhash5")
+
+ # Test function to simulate 'UploadChanges' after a successfull update of
+ # 'LLVM_NEXT_HASH" of the ebuild file.
+ def SuccessfullyUpdatedLLVMHash(ebuild_path, _, git_hash, svn_version):
+ self.assertEqual(
+ ebuild_path, "/some/path/to/chroot/src/path/to/package.ebuild"
+ )
+ self.assertEqual(git_hash, "a123testhash5")
+ self.assertEqual(svn_version, 1000)
+
+ # Test function to simulate 'UprevEbuildSymlink' when successfully
+ # incremented the revision number by 1.
+ def SuccessfullyUprevedEbuildSymlink(symlink_path):
+ self.assertEqual(
+ symlink_path,
+ "/some/path/to/chroot/src/path/to/package-r1.ebuild",
+ )
+
+ # Test function to simulate 'UpdatePackagesPatchMetadataFile()' when the
+ # patch results contains a disabled patch in 'disable_patches' mode.
+ def RetrievedPatchResults(chroot_path, svn_version, packages, mode):
+
+ self.assertEqual(chroot_path, Path("/some/path/to/chroot"))
+ self.assertEqual(svn_version, 1000)
+ self.assertListEqual(packages, ["path/to"])
+ self.assertEqual(mode, failure_modes.FailureModes.DISABLE_PATCHES)
+
+ patch_metadata_file = "PATCHES.json"
+ PatchInfo = collections.namedtuple(
+ "PatchInfo",
+ [
+ "applied_patches",
+ "failed_patches",
+ "non_applicable_patches",
+ "disabled_patches",
+ "removed_patches",
+ "modified_metadata",
+ ],
+ )
+
+ package_patch_info = PatchInfo(
+ applied_patches=["fix_display.patch"],
+ failed_patches=["fix_stdout.patch"],
+ non_applicable_patches=[],
+ disabled_patches=["fix_stdout.patch"],
+ removed_patches=[],
+ modified_metadata="/abs/path/to/filesdir/%s"
+ % patch_metadata_file,
+ )
+
+ package_info_dict = {"path/to": package_patch_info._asdict()}
+
+ # Returns a dictionary where the key is the package and the value is a
+ # dictionary that contains information about the package's patch results
+ # produced by the patch manager.
+ return package_info_dict
+
+ # Test function to simulate 'UploadChanges()' when successfully created a
+ # commit for the changes made to the packages and their patches and
+ # retrieved the change list of the commit.
+ def SuccessfullyUploadedChanges(*args):
+ assert len(args) == 3
+ commit_url = "https://some_name/path/to/commit/+/12345"
+ return git.CommitContents(url=commit_url, cl_number=12345)
+
+ test_package_path_dict = {symlink_path_to_package: abs_path_to_package}
+
+ # Simulate behavior of 'CreatePathDictionaryFromPackages()' when
+ # successfully created a dictionary where the key is the absolute path to
+ # the symlink of the package and value is the absolute path to the ebuild of
+ # the package.
+ mock_create_path_dict.return_value = test_package_path_dict
+
+ # Use test function to simulate behavior.
+ mock_create_repo.side_effect = SuccessfullyCreateBranchForChanges
+ mock_update_llvm_next.side_effect = SuccessfullyUpdatedLLVMHash
+ mock_uprev_symlink.side_effect = SuccessfullyUprevedEbuildSymlink
+ mock_update_package_metadata_file.side_effect = RetrievedPatchResults
+ mock_upload_changes.side_effect = SuccessfullyUploadedChanges
+ mock_llvm_version.return_value = "1234"
+ mock_mask_contains.reurn_value = None
+
+ packages_to_update = ["test-packages/package1"]
+ llvm_variant = update_chromeos_llvm_hash.LLVMVariant.next
+ git_hash = "a123testhash5"
+ svn_version = 1000
+ chroot_path = Path("/some/path/to/chroot")
+ git_hash_source = "tot"
+ branch = "update-LLVM_NEXT_HASH-a123testhash5"
+ extra_commit_msg = "\ncommit-message-end"
+
+ change_list = update_chromeos_llvm_hash.UpdatePackages(
+ packages=packages_to_update,
+ manifest_packages=[],
+ llvm_variant=llvm_variant,
+ git_hash=git_hash,
+ svn_version=svn_version,
+ chroot_path=chroot_path,
+ mode=failure_modes.FailureModes.DISABLE_PATCHES,
+ git_hash_source=git_hash_source,
+ extra_commit_msg=extra_commit_msg,
+ )
+
+ self.assertEqual(
+ change_list.url, "https://some_name/path/to/commit/+/12345"
+ )
+
+ self.assertEqual(change_list.cl_number, 12345)
+
+ mock_create_path_dict.assert_called_once_with(
+ chroot_path, packages_to_update
+ )
+
+ mock_create_repo.assert_called_once_with(path_to_package_dir, branch)
+
+ mock_update_llvm_next.assert_called_once_with(
+ abs_path_to_package, llvm_variant, git_hash, svn_version
+ )
+
+ mock_uprev_symlink.assert_called_once_with(symlink_path_to_package)
+
+ mock_mask_contains.assert_called_once_with(chroot_path, git_hash)
+
+ expected_commit_messages = [
+ "llvm-next/tot: upgrade to a123testhash5 (r1000)\n",
+ "The following packages have been updated:",
+ "path/to",
+ "\nFor the package path/to:",
+ "The patch metadata file PATCHES.json was modified",
+ "The following patches were disabled:",
+ "fix_stdout.patch",
+ "\ncommit-message-end",
+ ]
+
+ mock_update_package_metadata_file.assert_called_once()
+
+ mock_stage_patch_file.assert_called_once_with(
+ "/abs/path/to/filesdir/PATCHES.json"
+ )
+
+ mock_upload_changes.assert_called_once_with(
+ path_to_package_dir, branch, expected_commit_messages
+ )
+
+ mock_delete_repo.assert_called_once_with(path_to_package_dir, branch)
+
+ @mock.patch.object(chroot, "VerifyOutsideChroot")
+ @mock.patch.object(get_llvm_hash, "GetLLVMHashAndVersionFromSVNOption")
+ @mock.patch.object(update_chromeos_llvm_hash, "UpdatePackages")
+ def testMainDefaults(
+ self, mock_update_packages, mock_gethash, mock_outside_chroot
+ ):
+ git_hash = "1234abcd"
+ svn_version = 5678
+ mock_gethash.return_value = (git_hash, svn_version)
+ argv = [
+ "./update_chromeos_llvm_hash_unittest.py",
+ "--llvm_version",
+ "google3",
+ ]
+
+ with mock.patch.object(sys, "argv", argv) as mock.argv:
+ update_chromeos_llvm_hash.main()
+
+ expected_packages = set(update_chromeos_llvm_hash.DEFAULT_PACKAGES)
+ expected_manifest_packages = set(
+ update_chromeos_llvm_hash.DEFAULT_MANIFEST_PACKAGES,
+ )
+ expected_llvm_variant = update_chromeos_llvm_hash.LLVMVariant.current
+ expected_chroot = update_chromeos_llvm_hash.defaultCrosRoot()
+ mock_update_packages.assert_called_once_with(
+ packages=expected_packages,
+ manifest_packages=expected_manifest_packages,
+ llvm_variant=expected_llvm_variant,
+ git_hash=git_hash,
+ svn_version=svn_version,
+ chroot_path=expected_chroot,
+ mode=failure_modes.FailureModes.FAIL,
+ git_hash_source="google3",
+ extra_commit_msg=None,
+ )
+ mock_outside_chroot.assert_called()
+
+ @mock.patch.object(chroot, "VerifyOutsideChroot")
+ @mock.patch.object(get_llvm_hash, "GetLLVMHashAndVersionFromSVNOption")
+ @mock.patch.object(update_chromeos_llvm_hash, "UpdatePackages")
+ def testMainLlvmNext(
+ self, mock_update_packages, mock_gethash, mock_outside_chroot
+ ):
+ git_hash = "1234abcd"
+ svn_version = 5678
+ mock_gethash.return_value = (git_hash, svn_version)
+ argv = [
+ "./update_chromeos_llvm_hash_unittest.py",
+ "--llvm_version",
+ "google3",
+ "--is_llvm_next",
+ ]
+
+ with mock.patch.object(sys, "argv", argv) as mock.argv:
+ update_chromeos_llvm_hash.main()
+
+ expected_packages = set(update_chromeos_llvm_hash.DEFAULT_PACKAGES)
+ expected_llvm_variant = update_chromeos_llvm_hash.LLVMVariant.next
+ expected_chroot = update_chromeos_llvm_hash.defaultCrosRoot()
+ # llvm-next upgrade does not update manifest by default.
+ mock_update_packages.assert_called_once_with(
+ packages=expected_packages,
+ manifest_packages=set(),
+ llvm_variant=expected_llvm_variant,
+ git_hash=git_hash,
+ svn_version=svn_version,
+ chroot_path=expected_chroot,
+ mode=failure_modes.FailureModes.FAIL,
+ git_hash_source="google3",
+ extra_commit_msg=None,
+ )
+ mock_outside_chroot.assert_called()
+
+ @mock.patch.object(chroot, "VerifyOutsideChroot")
+ @mock.patch.object(get_llvm_hash, "GetLLVMHashAndVersionFromSVNOption")
+ @mock.patch.object(update_chromeos_llvm_hash, "UpdatePackages")
+ def testMainAllArgs(
+ self, mock_update_packages, mock_gethash, mock_outside_chroot
+ ):
+ packages_to_update = "test-packages/package1,test-libs/lib1"
+ manifest_packages = "test-libs/lib1,test-libs/lib2"
+ failure_mode = failure_modes.FailureModes.REMOVE_PATCHES
+ chroot_path = Path("/some/path/to/chroot")
+ llvm_ver = 435698
+ git_hash = "1234abcd"
+ svn_version = 5678
+ mock_gethash.return_value = (git_hash, svn_version)
+
+ argv = [
+ "./update_chromeos_llvm_hash_unittest.py",
+ "--llvm_version",
+ str(llvm_ver),
+ "--is_llvm_next",
+ "--chroot_path",
+ str(chroot_path),
+ "--update_packages",
+ packages_to_update,
+ "--manifest_packages",
+ manifest_packages,
+ "--failure_mode",
+ failure_mode.value,
+ "--patch_metadata_file",
+ "META.json",
+ ]
+
+ with mock.patch.object(sys, "argv", argv) as mock.argv:
+ update_chromeos_llvm_hash.main()
+
+ expected_packages = {"test-packages/package1", "test-libs/lib1"}
+ expected_manifest_packages = {"test-libs/lib1", "test-libs/lib2"}
+ expected_llvm_variant = update_chromeos_llvm_hash.LLVMVariant.next
+ mock_update_packages.assert_called_once_with(
+ packages=expected_packages,
+ manifest_packages=expected_manifest_packages,
+ llvm_variant=expected_llvm_variant,
+ git_hash=git_hash,
+ svn_version=svn_version,
+ chroot_path=chroot_path,
+ mode=failure_mode,
+ git_hash_source=llvm_ver,
+ extra_commit_msg=None,
+ )
+ mock_outside_chroot.assert_called()
+
+ @mock.patch.object(subprocess, "check_output", return_value=None)
+ @mock.patch.object(get_llvm_hash, "GetLLVMMajorVersion")
+ def testEnsurePackageMaskContainsExisting(
+ self, mock_llvm_version, mock_git_add
+ ):
+ chroot_path = "absolute/path/to/chroot"
+ git_hash = "badf00d"
+ mock_llvm_version.return_value = "1234"
+ with mock.patch(
+ "update_chromeos_llvm_hash.open",
+ mock.mock_open(read_data="\n=sys-devel/llvm-1234.0_pre*\n"),
+ create=True,
+ ) as mock_file:
+ update_chromeos_llvm_hash.EnsurePackageMaskContains(
+ chroot_path, git_hash
+ )
+ handle = mock_file()
+ handle.write.assert_not_called()
+ mock_llvm_version.assert_called_once_with(git_hash)
+
+ overlay_dir = (
+ "absolute/path/to/chroot/src/third_party/chromiumos-overlay"
+ )
+ mask_path = overlay_dir + "/profiles/targets/chromeos/package.mask"
+ mock_git_add.assert_called_once_with(
+ ["git", "-C", overlay_dir, "add", mask_path]
+ )
+
+ @mock.patch.object(subprocess, "check_output", return_value=None)
+ @mock.patch.object(get_llvm_hash, "GetLLVMMajorVersion")
+ def testEnsurePackageMaskContainsNotExisting(
+ self, mock_llvm_version, mock_git_add
+ ):
+ chroot_path = "absolute/path/to/chroot"
+ git_hash = "badf00d"
+ mock_llvm_version.return_value = "1234"
+ with mock.patch(
+ "update_chromeos_llvm_hash.open",
+ mock.mock_open(read_data="nothing relevant"),
+ create=True,
+ ) as mock_file:
+ update_chromeos_llvm_hash.EnsurePackageMaskContains(
+ chroot_path, git_hash
+ )
+ handle = mock_file()
+ handle.write.assert_called_once_with(
+ "=sys-devel/llvm-1234.0_pre*\n"
+ )
+ mock_llvm_version.assert_called_once_with(git_hash)
+
+ overlay_dir = (
+ "absolute/path/to/chroot/src/third_party/chromiumos-overlay"
+ )
+ mask_path = overlay_dir + "/profiles/targets/chromeos/package.mask"
+ mock_git_add.assert_called_once_with(
+ ["git", "-C", overlay_dir, "add", mask_path]
+ )
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/llvm_tools/update_packages_and_run_tests.py b/llvm_tools/update_packages_and_run_tests.py
index 2e4a9058..dc14b6de 100755
--- a/llvm_tools/update_packages_and_run_tests.py
+++ b/llvm_tools/update_packages_and_run_tests.py
@@ -1,12 +1,11 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs a tryjob/tryjobs after updating the packages."""
-from __future__ import print_function
import argparse
import datetime
@@ -19,466 +18,508 @@ import failure_modes
import get_llvm_hash
import update_chromeos_llvm_hash
-VALID_CQ_TRYBOTS = ['llvm', 'llvm-next', 'llvm-tot']
+
+VALID_CQ_TRYBOTS = ["llvm", "llvm-next", "llvm-tot"]
def GetCommandLineArgs():
- """Parses the command line for the command line arguments.
-
- Returns:
- The log level to use when retrieving the LLVM hash or google3 LLVM version,
- the chroot path to use for executing chroot commands,
- a list of a package or packages to update their LLVM next hash,
- and the LLVM version to use when retrieving the LLVM hash.
- """
-
- # Default path to the chroot if a path is not specified.
- cros_root = os.path.expanduser('~')
- cros_root = os.path.join(cros_root, 'chromiumos')
-
- # Create parser and add optional command-line arguments.
- parser = argparse.ArgumentParser(
- description='Update an LLVM hash of packages and run tests.')
-
- # Add argument for other change lists that want to run alongside the tryjob
- # which has a change list of updating a package's git hash.
- parser.add_argument(
- '--extra_change_lists',
- type=int,
- nargs='+',
- default=[],
- help='change lists that would like to be run alongside the change list '
- 'of updating the packages')
-
- # Add argument for a specific chroot path.
- parser.add_argument('--chroot_path',
- default=cros_root,
- help='the path to the chroot (default: %(default)s)')
-
- # Add argument to choose between llvm and llvm-next.
- parser.add_argument(
- '--is_llvm_next',
- action='store_true',
- help='which llvm hash to update. Update LLVM_NEXT_HASH if specified. '
- 'Otherwise, update LLVM_HASH')
-
- # Add argument for the absolute path to the file that contains information on
- # the previous tested svn version.
- parser.add_argument(
- '--last_tested',
- help='the absolute path to the file that contains the last tested '
- 'arguments.')
-
- # Add argument for the LLVM version to use.
- parser.add_argument('--llvm_version',
- type=get_llvm_hash.IsSvnOption,
- required=True,
- help='which git hash of LLVM to find '
- '{google3, ToT, <svn_version>} '
- '(default: finds the git hash of the google3 LLVM '
- 'version)')
-
- # Add argument to add reviewers for the created CL.
- parser.add_argument('--reviewers',
- nargs='+',
- default=[],
- help='The reviewers for the package update changelist')
-
- # Add argument for whether to display command contents to `stdout`.
- parser.add_argument('--verbose',
- action='store_true',
- help='display contents of a command to the terminal '
- '(default: %(default)s)')
-
- subparsers = parser.add_subparsers(dest='subparser_name')
- subparser_names = []
- # Testing with the tryjobs.
- tryjob_subparser = subparsers.add_parser('tryjobs')
- subparser_names.append('tryjobs')
- tryjob_subparser.add_argument('--builders',
- required=True,
- nargs='+',
- default=[],
- help='builders to use for the tryjob testing')
-
- # Add argument for custom options for the tryjob.
- tryjob_subparser.add_argument('--options',
- required=False,
- nargs='+',
- default=[],
- help='options to use for the tryjob testing')
-
- # Testing with the recipe builders
- recipe_subparser = subparsers.add_parser('recipe')
- subparser_names.append('recipe')
- recipe_subparser.add_argument('--options',
- required=False,
- nargs='+',
- default=[],
- help='options passed to the recipe builders')
-
- recipe_subparser.add_argument('--builders',
- required=True,
- nargs='+',
- default=[],
- help='recipe builders to launch')
-
- # Testing with CQ.
- cq_subparser = subparsers.add_parser('cq')
- subparser_names.append('cq')
-
- # Add argument for specify a cq trybot to test along with other cq builders
- # e.g. llvm, llvm-next or llvm-tot
- cq_subparser.add_argument(
- '--cq_trybot',
- choices=VALID_CQ_TRYBOTS,
- help='include the trybot to test together with other cq builders '
- 'available: %(choices)s')
-
- args_output = parser.parse_args()
-
- if args_output.subparser_name not in subparser_names:
- parser.error('one of %s must be specified' % subparser_names)
-
- return args_output
+ """Parses the command line for the command line arguments.
+
+ Returns:
+ The log level to use when retrieving the LLVM hash or google3 LLVM version,
+ the chroot path to use for executing chroot commands,
+ a list of a package or packages to update their LLVM next hash,
+ and the LLVM version to use when retrieving the LLVM hash.
+ """
+
+ # Default path to the chroot if a path is not specified.
+ cros_root = os.path.expanduser("~")
+ cros_root = os.path.join(cros_root, "chromiumos")
+
+ # Create parser and add optional command-line arguments.
+ parser = argparse.ArgumentParser(
+ description="Update an LLVM hash of packages and run tests."
+ )
+
+ # Add argument for other change lists that want to run alongside the tryjob
+ # which has a change list of updating a package's git hash.
+ parser.add_argument(
+ "--extra_change_lists",
+ type=int,
+ nargs="+",
+ default=[],
+ help="change lists that would like to be run alongside the change list "
+ "of updating the packages",
+ )
+
+ # Add argument for a specific chroot path.
+ parser.add_argument(
+ "--chroot_path",
+ default=cros_root,
+ help="the path to the chroot (default: %(default)s)",
+ )
+
+ # Add argument to choose between llvm and llvm-next.
+ parser.add_argument(
+ "--is_llvm_next",
+ action="store_true",
+ help="which llvm hash to update. Update LLVM_NEXT_HASH if specified. "
+ "Otherwise, update LLVM_HASH",
+ )
+
+ # Add argument for the absolute path to the file that contains information on
+ # the previous tested svn version.
+ parser.add_argument(
+ "--last_tested",
+ help="the absolute path to the file that contains the last tested "
+ "arguments.",
+ )
+
+ # Add argument for the LLVM version to use.
+ parser.add_argument(
+ "--llvm_version",
+ type=get_llvm_hash.IsSvnOption,
+ required=True,
+ help="which git hash of LLVM to find "
+ "{google3, ToT, <svn_version>} "
+ "(default: finds the git hash of the google3 LLVM "
+ "version)",
+ )
+
+ # Add argument to add reviewers for the created CL.
+ parser.add_argument(
+ "--reviewers",
+ nargs="+",
+ default=[],
+ help="The reviewers for the package update changelist",
+ )
+
+ # Add argument for whether to display command contents to `stdout`.
+ parser.add_argument(
+ "--verbose",
+ action="store_true",
+ help="display contents of a command to the terminal "
+ "(default: %(default)s)",
+ )
+
+ subparsers = parser.add_subparsers(dest="subparser_name")
+ subparser_names = []
+ # Testing with the tryjobs.
+ tryjob_subparser = subparsers.add_parser("tryjobs")
+ subparser_names.append("tryjobs")
+ tryjob_subparser.add_argument(
+ "--builders",
+ required=True,
+ nargs="+",
+ default=[],
+ help="builders to use for the tryjob testing",
+ )
+
+ # Add argument for custom options for the tryjob.
+ tryjob_subparser.add_argument(
+ "--options",
+ required=False,
+ nargs="+",
+ default=[],
+ help="options to use for the tryjob testing",
+ )
+
+ # Testing with the recipe builders
+ recipe_subparser = subparsers.add_parser("recipe")
+ subparser_names.append("recipe")
+ recipe_subparser.add_argument(
+ "--options",
+ required=False,
+ nargs="+",
+ default=[],
+ help="options passed to the recipe builders",
+ )
+
+ recipe_subparser.add_argument(
+ "--builders",
+ required=True,
+ nargs="+",
+ default=[],
+ help="recipe builders to launch",
+ )
+
+ # Testing with CQ.
+ cq_subparser = subparsers.add_parser("cq")
+ subparser_names.append("cq")
+
+ # Add argument for specify a cq trybot to test along with other cq builders
+ # e.g. llvm, llvm-next or llvm-tot
+ cq_subparser.add_argument(
+ "--cq_trybot",
+ choices=VALID_CQ_TRYBOTS,
+ help="include the trybot to test together with other cq builders "
+ "available: %(choices)s",
+ )
+
+ args_output = parser.parse_args()
+
+ if args_output.subparser_name not in subparser_names:
+ parser.error("one of %s must be specified" % subparser_names)
+
+ return args_output
def UnchangedSinceLastRun(last_tested_file, arg_dict):
- """Gets the arguments used for last run
+ """Gets the arguments used for last run
- Args:
- last_tested_file: The absolute path to the file that contains the
- arguments for the last run.
- arg_dict: The arguments used for this run.
+ Args:
+ last_tested_file: The absolute path to the file that contains the
+ arguments for the last run.
+ arg_dict: The arguments used for this run.
- Returns:
- Return true if the arguments used for last run exist and are the
- same as the arguments used for this run. Otherwise return false.
- """
+ Returns:
+ Return true if the arguments used for last run exist and are the
+ same as the arguments used for this run. Otherwise return false.
+ """
- if not last_tested_file:
- return False
+ if not last_tested_file:
+ return False
- # Get the last tested svn version if the file exists.
- last_arg_dict = None
- try:
- with open(last_tested_file) as f:
- last_arg_dict = json.load(f)
+ # Get the last tested svn version if the file exists.
+ last_arg_dict = None
+ try:
+ with open(last_tested_file) as f:
+ last_arg_dict = json.load(f)
- except (IOError, ValueError):
- return False
+ except (IOError, ValueError):
+ return False
- return arg_dict == last_arg_dict
+ return arg_dict == last_arg_dict
def AddReviewers(cl, reviewers, chroot_path):
- """Add reviewers for the created CL."""
+ """Add reviewers for the created CL."""
- gerrit_abs_path = os.path.join(chroot_path, 'chromite/bin/gerrit')
- for reviewer in reviewers:
- cmd = [gerrit_abs_path, 'reviewers', str(cl), reviewer]
+ gerrit_abs_path = os.path.join(chroot_path, "chromite/bin/gerrit")
+ for reviewer in reviewers:
+ cmd = [gerrit_abs_path, "reviewers", str(cl), reviewer]
- subprocess.check_output(cmd)
+ subprocess.check_output(cmd)
def AddLinksToCL(tests, cl, chroot_path):
- """Adds the test link(s) to the CL as a comment."""
+ """Adds the test link(s) to the CL as a comment."""
- # NOTE: Invoking `cros_sdk` does not make each tryjob link appear on its own
- # line, so invoking the `gerrit` command directly instead of using `cros_sdk`
- # to do it for us.
- #
- # FIXME: Need to figure out why `cros_sdk` does not add each tryjob link as a
- # newline.
- gerrit_abs_path = os.path.join(chroot_path, 'chromite/bin/gerrit')
+ # NOTE: Invoking `cros_sdk` does not make each tryjob link appear on its own
+ # line, so invoking the `gerrit` command directly instead of using `cros_sdk`
+ # to do it for us.
+ #
+ # FIXME: Need to figure out why `cros_sdk` does not add each tryjob link as a
+ # newline.
+ gerrit_abs_path = os.path.join(chroot_path, "chromite/bin/gerrit")
- links = ['Started the following tests:']
- links.extend(test['link'] for test in tests)
+ links = ["Started the following tests:"]
+ links.extend(test["link"] for test in tests)
- add_message_cmd = [gerrit_abs_path, 'message', str(cl), '\n'.join(links)]
+ add_message_cmd = [gerrit_abs_path, "message", str(cl), "\n".join(links)]
- subprocess.check_output(add_message_cmd)
+ subprocess.check_output(add_message_cmd)
# Testing with tryjobs
def GetCurrentTimeInUTC():
- """Returns the current time via `datetime.datetime.utcnow()`."""
- return datetime.datetime.utcnow()
+ """Returns the current time via `datetime.datetime.utcnow()`."""
+ return datetime.datetime.utcnow()
def GetTryJobCommand(change_list, extra_change_lists, options, builder):
- """Constructs the 'tryjob' command.
+ """Constructs the 'tryjob' command.
- Args:
- change_list: The CL obtained from updating the packages.
- extra_change_lists: Extra change lists that would like to be run alongside
- the change list of updating the packages.
- options: Options to be passed into the tryjob command.
- builder: The builder to be passed into the tryjob command.
+ Args:
+ change_list: The CL obtained from updating the packages.
+ extra_change_lists: Extra change lists that would like to be run alongside
+ the change list of updating the packages.
+ options: Options to be passed into the tryjob command.
+ builder: The builder to be passed into the tryjob command.
- Returns:
- The 'tryjob' command with the change list of updating the packages and
- any extra information that was passed into the command line.
- """
+ Returns:
+ The 'tryjob' command with the change list of updating the packages and
+ any extra information that was passed into the command line.
+ """
- tryjob_cmd = ['cros', 'tryjob', '--yes', '--json', '-g', '%d' % change_list]
+ tryjob_cmd = ["cros", "tryjob", "--yes", "--json", "-g", "%d" % change_list]
- if extra_change_lists:
- for extra_cl in extra_change_lists:
- tryjob_cmd.extend(['-g', '%d' % extra_cl])
+ if extra_change_lists:
+ for extra_cl in extra_change_lists:
+ tryjob_cmd.extend(["-g", "%d" % extra_cl])
- if options:
- tryjob_cmd.extend('--%s' % option for option in options)
+ if options:
+ tryjob_cmd.extend("--%s" % option for option in options)
- tryjob_cmd.append(builder)
+ tryjob_cmd.append(builder)
- return tryjob_cmd
+ return tryjob_cmd
def RunTryJobs(cl_number, extra_change_lists, options, builders, chroot_path):
- """Runs a tryjob/tryjobs.
+ """Runs a tryjob/tryjobs.
- Args:
- cl_number: The CL created by updating the packages.
- extra_change_lists: Any extra change lists that would run alongside the CL
- that was created by updating the packages ('cl_number').
- options: Any options to be passed into the 'tryjob' command.
- builders: All the builders to run the 'tryjob' with.
- chroot_path: The absolute path to the chroot.
+ Args:
+ cl_number: The CL created by updating the packages.
+ extra_change_lists: Any extra change lists that would run alongside the CL
+ that was created by updating the packages ('cl_number').
+ options: Any options to be passed into the 'tryjob' command.
+ builders: All the builders to run the 'tryjob' with.
+ chroot_path: The absolute path to the chroot.
- Returns:
- A list that contains stdout contents of each tryjob, where stdout is
- information (a hashmap) about the tryjob. The hashmap also contains stderr
- if there was an error when running a tryjob.
+ Returns:
+ A list that contains stdout contents of each tryjob, where stdout is
+ information (a hashmap) about the tryjob. The hashmap also contains stderr
+ if there was an error when running a tryjob.
- Raises:
- ValueError: Failed to submit a tryjob.
- """
+ Raises:
+ ValueError: Failed to submit a tryjob.
+ """
- # Contains the results of each builder.
- tests = []
+ # Contains the results of each builder.
+ tests = []
- # Run tryjobs with the change list number obtained from updating the
- # packages and append additional changes lists and options obtained from the
- # command line.
- for builder in builders:
- cmd = GetTryJobCommand(cl_number, extra_change_lists, options, builder)
+ # Run tryjobs with the change list number obtained from updating the
+ # packages and append additional changes lists and options obtained from the
+ # command line.
+ for builder in builders:
+ cmd = GetTryJobCommand(cl_number, extra_change_lists, options, builder)
- out = subprocess.check_output(cmd, cwd=chroot_path, encoding='utf-8')
+ out = subprocess.check_output(cmd, cwd=chroot_path, encoding="utf-8")
- test_output = json.loads(out)
+ test_output = json.loads(out)
- buildbucket_id = int(test_output[0]['id'])
+ buildbucket_id = int(test_output[0]["id"])
- tests.append({
- 'launch_time': str(GetCurrentTimeInUTC()),
- 'link': 'http://ci.chromium.org/b/%s' % buildbucket_id,
- 'buildbucket_id': buildbucket_id,
- 'extra_cls': extra_change_lists,
- 'options': options,
- 'builder': [builder]
- })
+ tests.append(
+ {
+ "launch_time": str(GetCurrentTimeInUTC()),
+ "link": "http://ci.chromium.org/b/%s" % buildbucket_id,
+ "buildbucket_id": buildbucket_id,
+ "extra_cls": extra_change_lists,
+ "options": options,
+ "builder": [builder],
+ }
+ )
- AddLinksToCL(tests, cl_number, chroot_path)
+ AddLinksToCL(tests, cl_number, chroot_path)
- return tests
+ return tests
-def StartRecipeBuilders(cl_number, extra_change_lists, options, builders,
- chroot_path):
- """Launch recipe builders.
+def StartRecipeBuilders(
+ cl_number, extra_change_lists, options, builders, chroot_path
+):
+ """Launch recipe builders.
- Args:
- cl_number: The CL created by updating the packages.
- extra_change_lists: Any extra change lists that would run alongside the CL
- that was created by updating the packages ('cl_number').
- options: Any options to be passed into the 'tryjob' command.
- builders: All the builders to run the 'tryjob' with.
- chroot_path: The absolute path to the chroot.
+ Args:
+ cl_number: The CL created by updating the packages.
+ extra_change_lists: Any extra change lists that would run alongside the CL
+ that was created by updating the packages ('cl_number').
+ options: Any options to be passed into the 'tryjob' command.
+ builders: All the builders to run the 'tryjob' with.
+ chroot_path: The absolute path to the chroot.
- Returns:
- A list that contains stdout contents of each builder, where stdout is
- information (a hashmap) about the tryjob. The hashmap also contains stderr
- if there was an error when running a tryjob.
+ Returns:
+ A list that contains stdout contents of each builder, where stdout is
+ information (a hashmap) about the tryjob. The hashmap also contains stderr
+ if there was an error when running a tryjob.
- Raises:
- ValueError: Failed to start a builder.
- """
+ Raises:
+ ValueError: Failed to start a builder.
+ """
- # Contains the results of each builder.
- tests = []
+ # Contains the results of each builder.
+ tests = []
- # Launch a builders with the change list number obtained from updating the
- # packages and append additional changes lists and options obtained from the
- # command line.
- for builder in builders:
- cmd = ['bb', 'add', '-json']
+ # Launch a builders with the change list number obtained from updating the
+ # packages and append additional changes lists and options obtained from the
+ # command line.
+ for builder in builders:
+ cmd = ["bb", "add", "-json"]
- if cl_number:
- cmd.extend(['-cl', 'crrev.com/c/%d' % cl_number])
+ if cl_number:
+ cmd.extend(["-cl", "crrev.com/c/%d" % cl_number])
- if extra_change_lists:
- for cl in extra_change_lists:
- cmd.extend(['-cl', 'crrev.com/c/%d' % cl])
+ if extra_change_lists:
+ for cl in extra_change_lists:
+ cmd.extend(["-cl", "crrev.com/c/%d" % cl])
- if options:
- cmd.extend(options)
+ if options:
+ cmd.extend(options)
- cmd.append(builder)
+ cmd.append(builder)
- out = subprocess.check_output(cmd, cwd=chroot_path, encoding='utf-8')
+ out = subprocess.check_output(cmd, cwd=chroot_path, encoding="utf-8")
- test_output = json.loads(out)
+ test_output = json.loads(out)
- tests.append({
- 'launch_time': test_output['createTime'],
- 'link': 'http://ci.chromium.org/b/%s' % test_output['id'],
- 'buildbucket_id': test_output['id'],
- 'extra_cls': extra_change_lists,
- 'options': options,
- 'builder': [builder]
- })
+ tests.append(
+ {
+ "launch_time": test_output["createTime"],
+ "link": "http://ci.chromium.org/b/%s" % test_output["id"],
+ "buildbucket_id": test_output["id"],
+ "extra_cls": extra_change_lists,
+ "options": options,
+ "builder": [builder],
+ }
+ )
- AddLinksToCL(tests, cl_number, chroot_path)
+ AddLinksToCL(tests, cl_number, chroot_path)
- return tests
+ return tests
# Testing with CQ
def GetCQDependString(dependent_cls):
- """Get CQ dependency string e.g. `Cq-Depend: chromium:MM, chromium:NN`."""
+ """Get CQ dependency string e.g. `Cq-Depend: chromium:MM, chromium:NN`."""
- if not dependent_cls:
- return None
+ if not dependent_cls:
+ return None
- # Cq-Depend must start a new paragraph prefixed with "Cq-Depend".
- return '\nCq-Depend: ' + ', '.join(
- ('chromium:%s' % i) for i in dependent_cls)
+ # Cq-Depend must start a new paragraph prefixed with "Cq-Depend".
+ return "\nCq-Depend: " + ", ".join(
+ ("chromium:%s" % i) for i in dependent_cls
+ )
def GetCQIncludeTrybotsString(trybot):
- """Get Cq-Include-Trybots string, for more llvm testings"""
+ """Get Cq-Include-Trybots string, for more llvm testings"""
- if not trybot:
- return None
+ if not trybot:
+ return None
- if trybot not in VALID_CQ_TRYBOTS:
- raise ValueError('%s is not a valid llvm trybot' % trybot)
+ if trybot not in VALID_CQ_TRYBOTS:
+ raise ValueError("%s is not a valid llvm trybot" % trybot)
- # Cq-Include-Trybots must start a new paragraph prefixed
- # with "Cq-Include-Trybots".
- return '\nCq-Include-Trybots:chromeos/cq:cq-%s-orchestrator' % trybot
+ # Cq-Include-Trybots must start a new paragraph prefixed
+ # with "Cq-Include-Trybots".
+ return "\nCq-Include-Trybots:chromeos/cq:cq-%s-orchestrator" % trybot
def StartCQDryRun(cl, dependent_cls, chroot_path):
- """Start CQ dry run for the changelist and dependencies."""
+ """Start CQ dry run for the changelist and dependencies."""
- gerrit_abs_path = os.path.join(chroot_path, 'chromite/bin/gerrit')
+ gerrit_abs_path = os.path.join(chroot_path, "chromite/bin/gerrit")
- cl_list = [cl]
- cl_list.extend(dependent_cls)
+ cl_list = [cl]
+ cl_list.extend(dependent_cls)
- for changes in cl_list:
- cq_dry_run_cmd = [gerrit_abs_path, 'label-cq', str(changes), '1']
+ for changes in cl_list:
+ cq_dry_run_cmd = [gerrit_abs_path, "label-cq", str(changes), "1"]
- subprocess.check_output(cq_dry_run_cmd)
+ subprocess.check_output(cq_dry_run_cmd)
def main():
- """Updates the packages' LLVM hash and run tests.
-
- Raises:
- AssertionError: The script was run inside the chroot.
- """
-
- chroot.VerifyOutsideChroot()
-
- args_output = GetCommandLineArgs()
-
- patch_metadata_file = 'PATCHES.json'
-
- svn_option = args_output.llvm_version
-
- git_hash, svn_version = get_llvm_hash.GetLLVMHashAndVersionFromSVNOption(
- svn_option)
-
- # There is no need to run tryjobs when all the key parameters remain unchanged
- # from last time.
-
- # If --last_tested is specified, check if the current run has the same
- # arguments last time --last_tested is used.
- if args_output.last_tested:
- chroot_file_paths = chroot.GetChrootEbuildPaths(
- args_output.chroot_path, update_chromeos_llvm_hash.DEFAULT_PACKAGES)
- arg_dict = {
- 'svn_version': svn_version,
- 'ebuilds': chroot_file_paths,
- 'extra_cls': args_output.extra_change_lists,
- }
- if args_output.subparser_name in ('tryjobs', 'recipe'):
- arg_dict['builders'] = args_output.builders
- arg_dict['tryjob_options'] = args_output.options
- if UnchangedSinceLastRun(args_output.last_tested, arg_dict):
- print('svn version (%d) matches the last tested svn version in %s' %
- (svn_version, args_output.last_tested))
- return
-
- llvm_variant = update_chromeos_llvm_hash.LLVMVariant.current
- if args_output.is_llvm_next:
- llvm_variant = update_chromeos_llvm_hash.LLVMVariant.next
- update_chromeos_llvm_hash.verbose = args_output.verbose
- extra_commit_msg = None
- if args_output.subparser_name == 'cq':
- cq_depend_msg = GetCQDependString(args_output.extra_change_lists)
- if cq_depend_msg:
- extra_commit_msg = cq_depend_msg
- cq_trybot_msg = GetCQIncludeTrybotsString(args_output.cq_trybot)
- if cq_trybot_msg:
- extra_commit_msg += cq_trybot_msg
-
- change_list = update_chromeos_llvm_hash.UpdatePackages(
- update_chromeos_llvm_hash.DEFAULT_PACKAGES,
- llvm_variant,
- git_hash,
- svn_version,
- args_output.chroot_path,
- patch_metadata_file,
- failure_modes.FailureModes.DISABLE_PATCHES,
- svn_option,
- extra_commit_msg=extra_commit_msg)
-
- AddReviewers(change_list.cl_number, args_output.reviewers,
- args_output.chroot_path)
-
- print('Successfully updated packages to %d' % svn_version)
- print('Gerrit URL: %s' % change_list.url)
- print('Change list number: %d' % change_list.cl_number)
-
- if args_output.subparser_name == 'tryjobs':
- tests = RunTryJobs(change_list.cl_number, args_output.extra_change_lists,
- args_output.options, args_output.builders,
- args_output.chroot_path)
- print('Tests:')
- for test in tests:
- print(test)
- elif args_output.subparser_name == 'recipe':
- tests = StartRecipeBuilders(change_list.cl_number,
- args_output.extra_change_lists,
- args_output.options, args_output.builders,
- args_output.chroot_path)
- print('Tests:')
- for test in tests:
- print(test)
-
- else:
- StartCQDryRun(change_list.cl_number, args_output.extra_change_lists,
- args_output.chroot_path)
-
- # If --last_tested is specified, record the arguments used
- if args_output.last_tested:
- with open(args_output.last_tested, 'w') as f:
- json.dump(arg_dict, f, indent=2)
-
-
-if __name__ == '__main__':
- main()
+ """Updates the packages' LLVM hash and run tests.
+
+ Raises:
+ AssertionError: The script was run inside the chroot.
+ """
+
+ chroot.VerifyOutsideChroot()
+
+ args_output = GetCommandLineArgs()
+
+ svn_option = args_output.llvm_version
+
+ git_hash, svn_version = get_llvm_hash.GetLLVMHashAndVersionFromSVNOption(
+ svn_option
+ )
+
+ # There is no need to run tryjobs when all the key parameters remain unchanged
+ # from last time.
+
+ # If --last_tested is specified, check if the current run has the same
+ # arguments last time --last_tested is used.
+ if args_output.last_tested:
+ chroot_file_paths = chroot.GetChrootEbuildPaths(
+ args_output.chroot_path, update_chromeos_llvm_hash.DEFAULT_PACKAGES
+ )
+ arg_dict = {
+ "svn_version": svn_version,
+ "ebuilds": chroot_file_paths,
+ "extra_cls": args_output.extra_change_lists,
+ }
+ if args_output.subparser_name in ("tryjobs", "recipe"):
+ arg_dict["builders"] = args_output.builders
+ arg_dict["tryjob_options"] = args_output.options
+ if UnchangedSinceLastRun(args_output.last_tested, arg_dict):
+ print(
+ "svn version (%d) matches the last tested svn version in %s"
+ % (svn_version, args_output.last_tested)
+ )
+ return
+
+ llvm_variant = update_chromeos_llvm_hash.LLVMVariant.current
+ if args_output.is_llvm_next:
+ llvm_variant = update_chromeos_llvm_hash.LLVMVariant.next
+ update_chromeos_llvm_hash.verbose = args_output.verbose
+ extra_commit_msg = None
+ if args_output.subparser_name == "cq":
+ cq_depend_msg = GetCQDependString(args_output.extra_change_lists)
+ if cq_depend_msg:
+ extra_commit_msg = cq_depend_msg
+ cq_trybot_msg = GetCQIncludeTrybotsString(args_output.cq_trybot)
+ if cq_trybot_msg:
+ extra_commit_msg += cq_trybot_msg
+
+ change_list = update_chromeos_llvm_hash.UpdatePackages(
+ packages=update_chromeos_llvm_hash.DEFAULT_PACKAGES,
+ manifest_packages=[],
+ llvm_variant=llvm_variant,
+ git_hash=git_hash,
+ svn_version=svn_version,
+ chroot_path=args_output.chroot_path,
+ mode=failure_modes.FailureModes.DISABLE_PATCHES,
+ git_hash_source=svn_option,
+ extra_commit_msg=extra_commit_msg,
+ )
+
+ AddReviewers(
+ change_list.cl_number, args_output.reviewers, args_output.chroot_path
+ )
+
+ print("Successfully updated packages to %d" % svn_version)
+ print("Gerrit URL: %s" % change_list.url)
+ print("Change list number: %d" % change_list.cl_number)
+
+ if args_output.subparser_name == "tryjobs":
+ tests = RunTryJobs(
+ change_list.cl_number,
+ args_output.extra_change_lists,
+ args_output.options,
+ args_output.builders,
+ args_output.chroot_path,
+ )
+ print("Tests:")
+ for test in tests:
+ print(test)
+ elif args_output.subparser_name == "recipe":
+ tests = StartRecipeBuilders(
+ change_list.cl_number,
+ args_output.extra_change_lists,
+ args_output.options,
+ args_output.builders,
+ args_output.chroot_path,
+ )
+ print("Tests:")
+ for test in tests:
+ print(test)
+
+ else:
+ StartCQDryRun(
+ change_list.cl_number,
+ args_output.extra_change_lists,
+ args_output.chroot_path,
+ )
+
+ # If --last_tested is specified, record the arguments used
+ if args_output.last_tested:
+ with open(args_output.last_tested, "w") as f:
+ json.dump(arg_dict, f, indent=2)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/llvm_tools/update_packages_and_run_tests_unittest.py b/llvm_tools/update_packages_and_run_tests_unittest.py
index 11f2b7f8..fc65749f 100755
--- a/llvm_tools/update_packages_and_run_tests_unittest.py
+++ b/llvm_tools/update_packages_and_run_tests_unittest.py
@@ -1,12 +1,11 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittests for running tests after updating packages."""
-from __future__ import print_function
import json
import subprocess
@@ -23,427 +22,521 @@ import update_packages_and_run_tests
# Testing with tryjobs.
class UpdatePackagesAndRunTryjobsTest(unittest.TestCase):
- """Unittests when running tryjobs after updating packages."""
-
- def testNoLastTestedFile(self):
- self.assertEqual(
- update_packages_and_run_tests.UnchangedSinceLastRun(None, {}), False)
-
- def testEmptyLastTestedFile(self):
- with test_helpers.CreateTemporaryFile() as temp_file:
- self.assertEqual(
- update_packages_and_run_tests.UnchangedSinceLastRun(temp_file, {}),
- False)
-
- def testLastTestedFileDoesNotExist(self):
- # Simulate 'open()' on a lasted tested file that does not exist.
- mock.mock_open(read_data='')
-
- self.assertEqual(
- update_packages_and_run_tests.UnchangedSinceLastRun(
- '/some/file/that/does/not/exist.txt', {}), False)
-
- def testMatchedLastTestedFile(self):
- with test_helpers.CreateTemporaryFile() as last_tested_file:
- arg_dict = {
- 'svn_version': 1234,
- 'ebuilds': [
- '/path/to/package1-r2.ebuild',
- '/path/to/package2/package2-r3.ebuild'
- ],
- 'builders': [
- 'kevin-llvm-next-toolchain-tryjob',
- 'eve-llvm-next-toolchain-tryjob'
- ],
- 'extra_cls': [10, 1],
- 'tryjob_options': ['latest-toolchain', 'hwtest']
- }
-
- with open(last_tested_file, 'w') as f:
- f.write(json.dumps(arg_dict, indent=2))
-
- self.assertEqual(
- update_packages_and_run_tests.UnchangedSinceLastRun(
- last_tested_file, arg_dict), True)
-
- def testGetTryJobCommandWithNoExtraInformation(self):
- change_list = 1234
-
- builder = 'nocturne'
-
- expected_cmd = [
- 'cros', 'tryjob', '--yes', '--json', '-g',
- '%d' % change_list, builder
- ]
-
- self.assertEqual(
- update_packages_and_run_tests.GetTryJobCommand(change_list, None, None,
- builder), expected_cmd)
-
- def testGetTryJobCommandWithExtraInformation(self):
- change_list = 4321
- extra_cls = [1000, 10]
- options = ['option1', 'option2']
- builder = 'kevin'
-
- expected_cmd = [
- 'cros',
- 'tryjob',
- '--yes',
- '--json',
- '-g',
- '%d' % change_list,
- '-g',
- '%d' % extra_cls[0],
- '-g',
- '%d' % extra_cls[1],
- '--%s' % options[0],
- '--%s' % options[1],
- builder,
- ]
-
- self.assertEqual(
- update_packages_and_run_tests.GetTryJobCommand(change_list, extra_cls,
- options, builder),
- expected_cmd)
-
- @mock.patch.object(
- update_packages_and_run_tests,
- 'GetCurrentTimeInUTC',
- return_value='2019-09-09')
- @mock.patch.object(update_packages_and_run_tests, 'AddLinksToCL')
- @mock.patch.object(subprocess, 'check_output')
- def testSuccessfullySubmittedTryJob(self, mock_cmd, mock_add_links_to_cl,
- mock_launch_time):
-
- expected_cmd = [
- 'cros', 'tryjob', '--yes', '--json', '-g',
- '%d' % 900, '-g',
- '%d' % 1200, '--some_option', 'builder1'
- ]
-
- bb_id = '1234'
- url = 'http://ci.chromium.org/b/%s' % bb_id
-
- mock_cmd.return_value = json.dumps([{'id': bb_id, 'url': url}])
-
- chroot_path = '/some/path/to/chroot'
- cl = 900
- extra_cls = [1200]
- options = ['some_option']
- builders = ['builder1']
-
- tests = update_packages_and_run_tests.RunTryJobs(cl, extra_cls, options,
- builders, chroot_path)
-
- expected_tests = [{
- 'launch_time': mock_launch_time.return_value,
- 'link': url,
- 'buildbucket_id': int(bb_id),
- 'extra_cls': extra_cls,
- 'options': options,
- 'builder': builders
- }]
-
- self.assertEqual(tests, expected_tests)
-
- mock_cmd.assert_called_once_with(
- expected_cmd, cwd=chroot_path, encoding='utf-8')
-
- mock_add_links_to_cl.assert_called_once()
-
- @mock.patch.object(update_packages_and_run_tests, 'AddLinksToCL')
- @mock.patch.object(subprocess, 'check_output')
- def testSuccessfullySubmittedRecipeBuilders(self, mock_cmd,
- mock_add_links_to_cl):
-
- expected_cmd = [
- 'bb', 'add', '-json', '-cl',
- 'crrev.com/c/%s' % 900, '-cl',
- 'crrev.com/c/%s' % 1200, 'some_option', 'builder1'
- ]
-
- bb_id = '1234'
- create_time = '2020-04-18T00:03:53.978767Z'
-
- mock_cmd.return_value = json.dumps({'id': bb_id, 'createTime': create_time})
-
- chroot_path = '/some/path/to/chroot'
- cl = 900
- extra_cls = [1200]
- options = ['some_option']
- builders = ['builder1']
-
- tests = update_packages_and_run_tests.StartRecipeBuilders(
- cl, extra_cls, options, builders, chroot_path)
-
- expected_tests = [{
- 'launch_time': create_time,
- 'link': 'http://ci.chromium.org/b/%s' % bb_id,
- 'buildbucket_id': bb_id,
- 'extra_cls': extra_cls,
- 'options': options,
- 'builder': builders
- }]
-
- self.assertEqual(tests, expected_tests)
-
- mock_cmd.assert_called_once_with(
- expected_cmd, cwd=chroot_path, encoding='utf-8')
-
- mock_add_links_to_cl.assert_called_once()
-
- @mock.patch.object(subprocess, 'check_output', return_value=None)
- def testSuccessfullyAddedTestLinkToCL(self, mock_exec_cmd):
- chroot_path = '/abs/path/to/chroot'
-
- test_cl_number = 1000
-
- tests = [{'link': 'https://some_tryjob_link.com'}]
-
- update_packages_and_run_tests.AddLinksToCL(tests, test_cl_number,
- chroot_path)
-
- expected_gerrit_message = [
- '%s/chromite/bin/gerrit' % chroot_path, 'message',
- str(test_cl_number),
- 'Started the following tests:\n%s' % tests[0]['link']
- ]
+ """Unittests when running tryjobs after updating packages."""
+
+ def testNoLastTestedFile(self):
+ self.assertEqual(
+ update_packages_and_run_tests.UnchangedSinceLastRun(None, {}), False
+ )
+
+ def testEmptyLastTestedFile(self):
+ with test_helpers.CreateTemporaryFile() as temp_file:
+ self.assertEqual(
+ update_packages_and_run_tests.UnchangedSinceLastRun(
+ temp_file, {}
+ ),
+ False,
+ )
+
+ def testLastTestedFileDoesNotExist(self):
+ # Simulate 'open()' on a lasted tested file that does not exist.
+ mock.mock_open(read_data="")
+
+ self.assertEqual(
+ update_packages_and_run_tests.UnchangedSinceLastRun(
+ "/some/file/that/does/not/exist.txt", {}
+ ),
+ False,
+ )
+
+ def testMatchedLastTestedFile(self):
+ with test_helpers.CreateTemporaryFile() as last_tested_file:
+ arg_dict = {
+ "svn_version": 1234,
+ "ebuilds": [
+ "/path/to/package1-r2.ebuild",
+ "/path/to/package2/package2-r3.ebuild",
+ ],
+ "builders": [
+ "kevin-llvm-next-toolchain-tryjob",
+ "eve-llvm-next-toolchain-tryjob",
+ ],
+ "extra_cls": [10, 1],
+ "tryjob_options": ["latest-toolchain", "hwtest"],
+ }
+
+ with open(last_tested_file, "w") as f:
+ f.write(json.dumps(arg_dict, indent=2))
+
+ self.assertEqual(
+ update_packages_and_run_tests.UnchangedSinceLastRun(
+ last_tested_file, arg_dict
+ ),
+ True,
+ )
+
+ def testGetTryJobCommandWithNoExtraInformation(self):
+ change_list = 1234
+
+ builder = "nocturne"
+
+ expected_cmd = [
+ "cros",
+ "tryjob",
+ "--yes",
+ "--json",
+ "-g",
+ "%d" % change_list,
+ builder,
+ ]
+
+ self.assertEqual(
+ update_packages_and_run_tests.GetTryJobCommand(
+ change_list, None, None, builder
+ ),
+ expected_cmd,
+ )
+
+ def testGetTryJobCommandWithExtraInformation(self):
+ change_list = 4321
+ extra_cls = [1000, 10]
+ options = ["option1", "option2"]
+ builder = "kevin"
+
+ expected_cmd = [
+ "cros",
+ "tryjob",
+ "--yes",
+ "--json",
+ "-g",
+ "%d" % change_list,
+ "-g",
+ "%d" % extra_cls[0],
+ "-g",
+ "%d" % extra_cls[1],
+ "--%s" % options[0],
+ "--%s" % options[1],
+ builder,
+ ]
+
+ self.assertEqual(
+ update_packages_and_run_tests.GetTryJobCommand(
+ change_list, extra_cls, options, builder
+ ),
+ expected_cmd,
+ )
+
+ @mock.patch.object(
+ update_packages_and_run_tests,
+ "GetCurrentTimeInUTC",
+ return_value="2019-09-09",
+ )
+ @mock.patch.object(update_packages_and_run_tests, "AddLinksToCL")
+ @mock.patch.object(subprocess, "check_output")
+ def testSuccessfullySubmittedTryJob(
+ self, mock_cmd, mock_add_links_to_cl, mock_launch_time
+ ):
+
+ expected_cmd = [
+ "cros",
+ "tryjob",
+ "--yes",
+ "--json",
+ "-g",
+ "%d" % 900,
+ "-g",
+ "%d" % 1200,
+ "--some_option",
+ "builder1",
+ ]
+
+ bb_id = "1234"
+ url = "http://ci.chromium.org/b/%s" % bb_id
+
+ mock_cmd.return_value = json.dumps([{"id": bb_id, "url": url}])
+
+ chroot_path = "/some/path/to/chroot"
+ cl = 900
+ extra_cls = [1200]
+ options = ["some_option"]
+ builders = ["builder1"]
+
+ tests = update_packages_and_run_tests.RunTryJobs(
+ cl, extra_cls, options, builders, chroot_path
+ )
+
+ expected_tests = [
+ {
+ "launch_time": mock_launch_time.return_value,
+ "link": url,
+ "buildbucket_id": int(bb_id),
+ "extra_cls": extra_cls,
+ "options": options,
+ "builder": builders,
+ }
+ ]
+
+ self.assertEqual(tests, expected_tests)
+
+ mock_cmd.assert_called_once_with(
+ expected_cmd, cwd=chroot_path, encoding="utf-8"
+ )
+
+ mock_add_links_to_cl.assert_called_once()
+
+ @mock.patch.object(update_packages_and_run_tests, "AddLinksToCL")
+ @mock.patch.object(subprocess, "check_output")
+ def testSuccessfullySubmittedRecipeBuilders(
+ self, mock_cmd, mock_add_links_to_cl
+ ):
+
+ expected_cmd = [
+ "bb",
+ "add",
+ "-json",
+ "-cl",
+ "crrev.com/c/%s" % 900,
+ "-cl",
+ "crrev.com/c/%s" % 1200,
+ "some_option",
+ "builder1",
+ ]
+
+ bb_id = "1234"
+ create_time = "2020-04-18T00:03:53.978767Z"
+
+ mock_cmd.return_value = json.dumps(
+ {"id": bb_id, "createTime": create_time}
+ )
+
+ chroot_path = "/some/path/to/chroot"
+ cl = 900
+ extra_cls = [1200]
+ options = ["some_option"]
+ builders = ["builder1"]
+
+ tests = update_packages_and_run_tests.StartRecipeBuilders(
+ cl, extra_cls, options, builders, chroot_path
+ )
+
+ expected_tests = [
+ {
+ "launch_time": create_time,
+ "link": "http://ci.chromium.org/b/%s" % bb_id,
+ "buildbucket_id": bb_id,
+ "extra_cls": extra_cls,
+ "options": options,
+ "builder": builders,
+ }
+ ]
+
+ self.assertEqual(tests, expected_tests)
+
+ mock_cmd.assert_called_once_with(
+ expected_cmd, cwd=chroot_path, encoding="utf-8"
+ )
+
+ mock_add_links_to_cl.assert_called_once()
+
+ @mock.patch.object(subprocess, "check_output", return_value=None)
+ def testSuccessfullyAddedTestLinkToCL(self, mock_exec_cmd):
+ chroot_path = "/abs/path/to/chroot"
+
+ test_cl_number = 1000
+
+ tests = [{"link": "https://some_tryjob_link.com"}]
+
+ update_packages_and_run_tests.AddLinksToCL(
+ tests, test_cl_number, chroot_path
+ )
+
+ expected_gerrit_message = [
+ "%s/chromite/bin/gerrit" % chroot_path,
+ "message",
+ str(test_cl_number),
+ "Started the following tests:\n%s" % tests[0]["link"],
+ ]
+
+ mock_exec_cmd.assert_called_once_with(expected_gerrit_message)
+
+ @mock.patch.object(update_packages_and_run_tests, "RunTryJobs")
+ @mock.patch.object(update_chromeos_llvm_hash, "UpdatePackages")
+ @mock.patch.object(update_packages_and_run_tests, "GetCommandLineArgs")
+ @mock.patch.object(get_llvm_hash, "GetLLVMHashAndVersionFromSVNOption")
+ @mock.patch.object(chroot, "VerifyOutsideChroot", return_value=True)
+ @mock.patch.object(chroot, "GetChrootEbuildPaths")
+ def testUpdatedLastTestedFileWithNewTestedRevision(
+ self,
+ mock_get_chroot_build_paths,
+ mock_outside_chroot,
+ mock_get_hash_and_version,
+ mock_get_commandline_args,
+ mock_update_packages,
+ mock_run_tryjobs,
+ ):
+
+ # Create a temporary file to simulate the last tested file that contains a
+ # revision.
+ with test_helpers.CreateTemporaryFile() as last_tested_file:
+ builders = [
+ "kevin-llvm-next-toolchain-tryjob",
+ "eve-llvm-next-toolchain-tryjob",
+ ]
+ extra_cls = [10, 1]
+ tryjob_options = ["latest-toolchain", "hwtest"]
+ ebuilds = [
+ "/path/to/package1/package1-r2.ebuild",
+ "/path/to/package2/package2-r3.ebuild",
+ ]
+
+ arg_dict = {
+ "svn_version": 100,
+ "ebuilds": ebuilds,
+ "builders": builders,
+ "extra_cls": extra_cls,
+ "tryjob_options": tryjob_options,
+ }
+ # Parepared last tested file
+ with open(last_tested_file, "w") as f:
+ json.dump(arg_dict, f, indent=2)
+
+ # Call with a changed LLVM svn version
+ args_output = test_helpers.ArgsOutputTest()
+ args_output.is_llvm_next = True
+ args_output.extra_change_lists = extra_cls
+ args_output.last_tested = last_tested_file
+ args_output.reviewers = []
+
+ args_output.subparser_name = "tryjobs"
+ args_output.builders = builders
+ args_output.options = tryjob_options
+
+ mock_get_commandline_args.return_value = args_output
+
+ mock_get_chroot_build_paths.return_value = ebuilds
+
+ mock_get_hash_and_version.return_value = ("a123testhash2", 200)
+
+ mock_update_packages.return_value = git.CommitContents(
+ url="https://some_cl_url.com", cl_number=12345
+ )
+
+ mock_run_tryjobs.return_value = [
+ {"link": "https://some_tryjob_url.com", "buildbucket_id": 1234}
+ ]
+
+ update_packages_and_run_tests.main()
- mock_exec_cmd.assert_called_once_with(expected_gerrit_message)
+ # Verify that the lasted tested file has been updated to the new LLVM
+ # revision.
+ with open(last_tested_file) as f:
+ arg_dict = json.load(f)
- @mock.patch.object(update_packages_and_run_tests, 'RunTryJobs')
- @mock.patch.object(update_chromeos_llvm_hash, 'UpdatePackages')
- @mock.patch.object(update_packages_and_run_tests, 'GetCommandLineArgs')
- @mock.patch.object(get_llvm_hash, 'GetLLVMHashAndVersionFromSVNOption')
- @mock.patch.object(chroot, 'VerifyOutsideChroot', return_value=True)
- @mock.patch.object(chroot, 'GetChrootEbuildPaths')
- def testUpdatedLastTestedFileWithNewTestedRevision(
- self, mock_get_chroot_build_paths, mock_outside_chroot,
- mock_get_hash_and_version, mock_get_commandline_args,
- mock_update_packages, mock_run_tryjobs):
+ self.assertEqual(arg_dict["svn_version"], 200)
+
+ mock_outside_chroot.assert_called_once()
- # Create a temporary file to simulate the last tested file that contains a
- # revision.
- with test_helpers.CreateTemporaryFile() as last_tested_file:
- builders = [
- 'kevin-llvm-next-toolchain-tryjob', 'eve-llvm-next-toolchain-tryjob'
- ]
- extra_cls = [10, 1]
- tryjob_options = ['latest-toolchain', 'hwtest']
- ebuilds = [
- '/path/to/package1/package1-r2.ebuild',
- '/path/to/package2/package2-r3.ebuild'
- ]
-
- arg_dict = {
- 'svn_version': 100,
- 'ebuilds': ebuilds,
- 'builders': builders,
- 'extra_cls': extra_cls,
- 'tryjob_options': tryjob_options
- }
- # Parepared last tested file
- with open(last_tested_file, 'w') as f:
- json.dump(arg_dict, f, indent=2)
-
- # Call with a changed LLVM svn version
- args_output = test_helpers.ArgsOutputTest()
- args_output.is_llvm_next = True
- args_output.extra_change_lists = extra_cls
- args_output.last_tested = last_tested_file
- args_output.reviewers = []
-
- args_output.subparser_name = 'tryjobs'
- args_output.builders = builders
- args_output.options = tryjob_options
-
- mock_get_commandline_args.return_value = args_output
-
- mock_get_chroot_build_paths.return_value = ebuilds
-
- mock_get_hash_and_version.return_value = ('a123testhash2', 200)
-
- mock_update_packages.return_value = git.CommitContents(
- url='https://some_cl_url.com', cl_number=12345)
-
- mock_run_tryjobs.return_value = [{
- 'link': 'https://some_tryjob_url.com',
- 'buildbucket_id': 1234
- }]
-
- update_packages_and_run_tests.main()
-
- # Verify that the lasted tested file has been updated to the new LLVM
- # revision.
- with open(last_tested_file) as f:
- arg_dict = json.load(f)
-
- self.assertEqual(arg_dict['svn_version'], 200)
-
- mock_outside_chroot.assert_called_once()
-
- mock_get_commandline_args.assert_called_once()
-
- mock_get_hash_and_version.assert_called_once()
-
- mock_run_tryjobs.assert_called_once()
-
- mock_update_packages.assert_called_once()
+ mock_get_commandline_args.assert_called_once()
+
+ mock_get_hash_and_version.assert_called_once()
+
+ mock_run_tryjobs.assert_called_once()
+
+ mock_update_packages.assert_called_once()
class UpdatePackagesAndRunTestCQTest(unittest.TestCase):
- """Unittests for CQ dry run after updating packages."""
-
- def testGetCQDependString(self):
- test_no_changelists = []
- test_single_changelist = [1234]
- test_multiple_changelists = [1234, 5678]
-
- self.assertIsNone(
- update_packages_and_run_tests.GetCQDependString(test_no_changelists))
-
- self.assertEqual(
- update_packages_and_run_tests.GetCQDependString(test_single_changelist),
- '\nCq-Depend: chromium:1234')
-
- self.assertEqual(
- update_packages_and_run_tests.GetCQDependString(
- test_multiple_changelists),
- '\nCq-Depend: chromium:1234, chromium:5678')
-
- def testGetCQIncludeTrybotsString(self):
- test_no_trybot = None
- test_valid_trybot = 'llvm-next'
- test_invalid_trybot = 'invalid-name'
-
- self.assertIsNone(
- update_packages_and_run_tests.GetCQIncludeTrybotsString(test_no_trybot))
-
- self.assertEqual(
- update_packages_and_run_tests.GetCQIncludeTrybotsString(
- test_valid_trybot),
- '\nCq-Include-Trybots:chromeos/cq:cq-llvm-next-orchestrator')
-
- with self.assertRaises(ValueError) as context:
- update_packages_and_run_tests.GetCQIncludeTrybotsString(
- test_invalid_trybot)
-
- self.assertIn('is not a valid llvm trybot', str(context.exception))
-
- @mock.patch.object(subprocess, 'check_output', return_value=None)
- def testStartCQDryRunNoDeps(self, mock_exec_cmd):
- chroot_path = '/abs/path/to/chroot'
- test_cl_number = 1000
-
- # test with no deps cls.
- extra_cls = []
- update_packages_and_run_tests.StartCQDryRun(test_cl_number, extra_cls,
- chroot_path)
-
- expected_gerrit_message = [
- '%s/chromite/bin/gerrit' % chroot_path, 'label-cq',
- str(test_cl_number), '1'
- ]
-
- mock_exec_cmd.assert_called_once_with(expected_gerrit_message)
-
- # Mock ExecCommandAndCaptureOutput for the gerrit command execution.
- @mock.patch.object(subprocess, 'check_output', return_value=None)
- # test with a single deps cl.
- def testStartCQDryRunSingleDep(self, mock_exec_cmd):
- chroot_path = '/abs/path/to/chroot'
- test_cl_number = 1000
-
- extra_cls = [2000]
- update_packages_and_run_tests.StartCQDryRun(test_cl_number, extra_cls,
- chroot_path)
-
- expected_gerrit_cmd_1 = [
- '%s/chromite/bin/gerrit' % chroot_path, 'label-cq',
- str(test_cl_number), '1'
- ]
- expected_gerrit_cmd_2 = [
- '%s/chromite/bin/gerrit' % chroot_path, 'label-cq',
- str(2000), '1'
- ]
-
- self.assertEqual(mock_exec_cmd.call_count, 2)
- self.assertEqual(mock_exec_cmd.call_args_list[0],
- mock.call(expected_gerrit_cmd_1))
- self.assertEqual(mock_exec_cmd.call_args_list[1],
- mock.call(expected_gerrit_cmd_2))
-
- # Mock ExecCommandAndCaptureOutput for the gerrit command execution.
- @mock.patch.object(subprocess, 'check_output', return_value=None)
- def testStartCQDryRunMultipleDep(self, mock_exec_cmd):
- chroot_path = '/abs/path/to/chroot'
- test_cl_number = 1000
-
- # test with multiple deps cls.
- extra_cls = [3000, 4000]
- update_packages_and_run_tests.StartCQDryRun(test_cl_number, extra_cls,
- chroot_path)
-
- expected_gerrit_cmd_1 = [
- '%s/chromite/bin/gerrit' % chroot_path, 'label-cq',
- str(test_cl_number), '1'
- ]
- expected_gerrit_cmd_2 = [
- '%s/chromite/bin/gerrit' % chroot_path, 'label-cq',
- str(3000), '1'
- ]
- expected_gerrit_cmd_3 = [
- '%s/chromite/bin/gerrit' % chroot_path, 'label-cq',
- str(4000), '1'
- ]
-
- self.assertEqual(mock_exec_cmd.call_count, 3)
- self.assertEqual(mock_exec_cmd.call_args_list[0],
- mock.call(expected_gerrit_cmd_1))
- self.assertEqual(mock_exec_cmd.call_args_list[1],
- mock.call(expected_gerrit_cmd_2))
- self.assertEqual(mock_exec_cmd.call_args_list[2],
- mock.call(expected_gerrit_cmd_3))
-
- # Mock ExecCommandAndCaptureOutput for the gerrit command execution.
- @mock.patch.object(subprocess, 'check_output', return_value=None)
- # test with no reviewers.
- def testAddReviewersNone(self, mock_exec_cmd):
- chroot_path = '/abs/path/to/chroot'
- reviewers = []
- test_cl_number = 1000
-
- update_packages_and_run_tests.AddReviewers(test_cl_number, reviewers,
- chroot_path)
- self.assertTrue(mock_exec_cmd.not_called)
-
- # Mock ExecCommandAndCaptureOutput for the gerrit command execution.
- @mock.patch.object(subprocess, 'check_output', return_value=None)
- # test with multiple reviewers.
- def testAddReviewersMultiple(self, mock_exec_cmd):
- chroot_path = '/abs/path/to/chroot'
- reviewers = ['none1@chromium.org', 'none2@chromium.org']
- test_cl_number = 1000
-
- update_packages_and_run_tests.AddReviewers(test_cl_number, reviewers,
- chroot_path)
-
- expected_gerrit_cmd_1 = [
- '%s/chromite/bin/gerrit' % chroot_path, 'reviewers',
- str(test_cl_number), 'none1@chromium.org'
- ]
- expected_gerrit_cmd_2 = [
- '%s/chromite/bin/gerrit' % chroot_path, 'reviewers',
- str(test_cl_number), 'none2@chromium.org'
- ]
-
- self.assertEqual(mock_exec_cmd.call_count, 2)
- self.assertEqual(mock_exec_cmd.call_args_list[0],
- mock.call(expected_gerrit_cmd_1))
- self.assertEqual(mock_exec_cmd.call_args_list[1],
- mock.call(expected_gerrit_cmd_2))
-
-
-if __name__ == '__main__':
- unittest.main()
+ """Unittests for CQ dry run after updating packages."""
+
+ def testGetCQDependString(self):
+ test_no_changelists = []
+ test_single_changelist = [1234]
+ test_multiple_changelists = [1234, 5678]
+
+ self.assertIsNone(
+ update_packages_and_run_tests.GetCQDependString(test_no_changelists)
+ )
+
+ self.assertEqual(
+ update_packages_and_run_tests.GetCQDependString(
+ test_single_changelist
+ ),
+ "\nCq-Depend: chromium:1234",
+ )
+
+ self.assertEqual(
+ update_packages_and_run_tests.GetCQDependString(
+ test_multiple_changelists
+ ),
+ "\nCq-Depend: chromium:1234, chromium:5678",
+ )
+
+ def testGetCQIncludeTrybotsString(self):
+ test_no_trybot = None
+ test_valid_trybot = "llvm-next"
+ test_invalid_trybot = "invalid-name"
+
+ self.assertIsNone(
+ update_packages_and_run_tests.GetCQIncludeTrybotsString(
+ test_no_trybot
+ )
+ )
+
+ self.assertEqual(
+ update_packages_and_run_tests.GetCQIncludeTrybotsString(
+ test_valid_trybot
+ ),
+ "\nCq-Include-Trybots:chromeos/cq:cq-llvm-next-orchestrator",
+ )
+
+ with self.assertRaises(ValueError) as context:
+ update_packages_and_run_tests.GetCQIncludeTrybotsString(
+ test_invalid_trybot
+ )
+
+ self.assertIn("is not a valid llvm trybot", str(context.exception))
+
+ @mock.patch.object(subprocess, "check_output", return_value=None)
+ def testStartCQDryRunNoDeps(self, mock_exec_cmd):
+ chroot_path = "/abs/path/to/chroot"
+ test_cl_number = 1000
+
+ # test with no deps cls.
+ extra_cls = []
+ update_packages_and_run_tests.StartCQDryRun(
+ test_cl_number, extra_cls, chroot_path
+ )
+
+ expected_gerrit_message = [
+ "%s/chromite/bin/gerrit" % chroot_path,
+ "label-cq",
+ str(test_cl_number),
+ "1",
+ ]
+
+ mock_exec_cmd.assert_called_once_with(expected_gerrit_message)
+
+ # Mock ExecCommandAndCaptureOutput for the gerrit command execution.
+ @mock.patch.object(subprocess, "check_output", return_value=None)
+ # test with a single deps cl.
+ def testStartCQDryRunSingleDep(self, mock_exec_cmd):
+ chroot_path = "/abs/path/to/chroot"
+ test_cl_number = 1000
+
+ extra_cls = [2000]
+ update_packages_and_run_tests.StartCQDryRun(
+ test_cl_number, extra_cls, chroot_path
+ )
+
+ expected_gerrit_cmd_1 = [
+ "%s/chromite/bin/gerrit" % chroot_path,
+ "label-cq",
+ str(test_cl_number),
+ "1",
+ ]
+ expected_gerrit_cmd_2 = [
+ "%s/chromite/bin/gerrit" % chroot_path,
+ "label-cq",
+ str(2000),
+ "1",
+ ]
+
+ self.assertEqual(mock_exec_cmd.call_count, 2)
+ self.assertEqual(
+ mock_exec_cmd.call_args_list[0], mock.call(expected_gerrit_cmd_1)
+ )
+ self.assertEqual(
+ mock_exec_cmd.call_args_list[1], mock.call(expected_gerrit_cmd_2)
+ )
+
+ # Mock ExecCommandAndCaptureOutput for the gerrit command execution.
+ @mock.patch.object(subprocess, "check_output", return_value=None)
+ def testStartCQDryRunMultipleDep(self, mock_exec_cmd):
+ chroot_path = "/abs/path/to/chroot"
+ test_cl_number = 1000
+
+ # test with multiple deps cls.
+ extra_cls = [3000, 4000]
+ update_packages_and_run_tests.StartCQDryRun(
+ test_cl_number, extra_cls, chroot_path
+ )
+
+ expected_gerrit_cmd_1 = [
+ "%s/chromite/bin/gerrit" % chroot_path,
+ "label-cq",
+ str(test_cl_number),
+ "1",
+ ]
+ expected_gerrit_cmd_2 = [
+ "%s/chromite/bin/gerrit" % chroot_path,
+ "label-cq",
+ str(3000),
+ "1",
+ ]
+ expected_gerrit_cmd_3 = [
+ "%s/chromite/bin/gerrit" % chroot_path,
+ "label-cq",
+ str(4000),
+ "1",
+ ]
+
+ self.assertEqual(mock_exec_cmd.call_count, 3)
+ self.assertEqual(
+ mock_exec_cmd.call_args_list[0], mock.call(expected_gerrit_cmd_1)
+ )
+ self.assertEqual(
+ mock_exec_cmd.call_args_list[1], mock.call(expected_gerrit_cmd_2)
+ )
+ self.assertEqual(
+ mock_exec_cmd.call_args_list[2], mock.call(expected_gerrit_cmd_3)
+ )
+
+ # Mock ExecCommandAndCaptureOutput for the gerrit command execution.
+ @mock.patch.object(subprocess, "check_output", return_value=None)
+ # test with no reviewers.
+ def testAddReviewersNone(self, mock_exec_cmd):
+ chroot_path = "/abs/path/to/chroot"
+ reviewers = []
+ test_cl_number = 1000
+
+ update_packages_and_run_tests.AddReviewers(
+ test_cl_number, reviewers, chroot_path
+ )
+ self.assertTrue(mock_exec_cmd.not_called)
+
+ # Mock ExecCommandAndCaptureOutput for the gerrit command execution.
+ @mock.patch.object(subprocess, "check_output", return_value=None)
+ # test with multiple reviewers.
+ def testAddReviewersMultiple(self, mock_exec_cmd):
+ chroot_path = "/abs/path/to/chroot"
+ reviewers = ["none1@chromium.org", "none2@chromium.org"]
+ test_cl_number = 1000
+
+ update_packages_and_run_tests.AddReviewers(
+ test_cl_number, reviewers, chroot_path
+ )
+
+ expected_gerrit_cmd_1 = [
+ "%s/chromite/bin/gerrit" % chroot_path,
+ "reviewers",
+ str(test_cl_number),
+ "none1@chromium.org",
+ ]
+ expected_gerrit_cmd_2 = [
+ "%s/chromite/bin/gerrit" % chroot_path,
+ "reviewers",
+ str(test_cl_number),
+ "none2@chromium.org",
+ ]
+
+ self.assertEqual(mock_exec_cmd.call_count, 2)
+ self.assertEqual(
+ mock_exec_cmd.call_args_list[0], mock.call(expected_gerrit_cmd_1)
+ )
+ self.assertEqual(
+ mock_exec_cmd.call_args_list[1], mock.call(expected_gerrit_cmd_2)
+ )
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/llvm_tools/update_tryjob_status.py b/llvm_tools/update_tryjob_status.py
index f25fadca..49c48658 100755
--- a/llvm_tools/update_tryjob_status.py
+++ b/llvm_tools/update_tryjob_status.py
@@ -1,12 +1,11 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Updates the status of a tryjob."""
-from __future__ import print_function
import argparse
import enum
@@ -20,245 +19,291 @@ from test_helpers import CreateTemporaryJsonFile
class TryjobStatus(enum.Enum):
- """Values for the 'status' field of a tryjob."""
+ """Values for the 'status' field of a tryjob."""
- GOOD = 'good'
- BAD = 'bad'
- PENDING = 'pending'
- SKIP = 'skip'
+ GOOD = "good"
+ BAD = "bad"
+ PENDING = "pending"
+ SKIP = "skip"
- # Executes the script passed into the command line (this script's exit code
- # determines the 'status' value of the tryjob).
- CUSTOM_SCRIPT = 'custom_script'
+ # Executes the script passed into the command line (this script's exit code
+ # determines the 'status' value of the tryjob).
+ CUSTOM_SCRIPT = "custom_script"
class CustomScriptStatus(enum.Enum):
- """Exit code values of a custom script."""
+ """Exit code values of a custom script."""
- # NOTE: Not using 1 for 'bad' because the custom script can raise an
- # exception which would cause the exit code of the script to be 1, so the
- # tryjob's 'status' would be updated when there is an exception.
- #
- # Exit codes are as follows:
- # 0: 'good'
- # 124: 'bad'
- # 125: 'skip'
- GOOD = 0
- BAD = 124
- SKIP = 125
+ # NOTE: Not using 1 for 'bad' because the custom script can raise an
+ # exception which would cause the exit code of the script to be 1, so the
+ # tryjob's 'status' would be updated when there is an exception.
+ #
+ # Exit codes are as follows:
+ # 0: 'good'
+ # 124: 'bad'
+ # 125: 'skip'
+ GOOD = 0
+ BAD = 124
+ SKIP = 125
custom_script_exit_value_mapping = {
CustomScriptStatus.GOOD.value: TryjobStatus.GOOD.value,
CustomScriptStatus.BAD.value: TryjobStatus.BAD.value,
- CustomScriptStatus.SKIP.value: TryjobStatus.SKIP.value
+ CustomScriptStatus.SKIP.value: TryjobStatus.SKIP.value,
}
def GetCommandLineArgs():
- """Parses the command line for the command line arguments."""
-
- # Default absoute path to the chroot if not specified.
- cros_root = os.path.expanduser('~')
- cros_root = os.path.join(cros_root, 'chromiumos')
-
- # Create parser and add optional command-line arguments.
- parser = argparse.ArgumentParser(
- description='Updates the status of a tryjob.')
-
- # Add argument for the JSON file to use for the update of a tryjob.
- parser.add_argument(
- '--status_file',
- required=True,
- help='The absolute path to the JSON file that contains the tryjobs used '
- 'for bisecting LLVM.')
-
- # Add argument that sets the 'status' field to that value.
- parser.add_argument(
- '--set_status',
- required=True,
- choices=[tryjob_status.value for tryjob_status in TryjobStatus],
- help='Sets the "status" field of the tryjob.')
-
- # Add argument that determines which revision to search for in the list of
- # tryjobs.
- parser.add_argument(
- '--revision',
- required=True,
- type=int,
- help='The revision to set its status.')
-
- # Add argument for the custom script to execute for the 'custom_script'
- # option in '--set_status'.
- parser.add_argument(
- '--custom_script',
- help='The absolute path to the custom script to execute (its exit code '
- 'should be %d for "good", %d for "bad", or %d for "skip")' %
- (CustomScriptStatus.GOOD.value, CustomScriptStatus.BAD.value,
- CustomScriptStatus.SKIP.value))
-
- args_output = parser.parse_args()
-
- if not (os.path.isfile(
- args_output.status_file and
- not args_output.status_file.endswith('.json'))):
- raise ValueError('File does not exist or does not ending in ".json" '
- ': %s' % args_output.status_file)
-
- if (args_output.set_status == TryjobStatus.CUSTOM_SCRIPT.value and
- not args_output.custom_script):
- raise ValueError('Please provide the absolute path to the script to '
- 'execute.')
-
- return args_output
+ """Parses the command line for the command line arguments."""
+
+ # Default absoute path to the chroot if not specified.
+ cros_root = os.path.expanduser("~")
+ cros_root = os.path.join(cros_root, "chromiumos")
+
+ # Create parser and add optional command-line arguments.
+ parser = argparse.ArgumentParser(
+ description="Updates the status of a tryjob."
+ )
+
+ # Add argument for the JSON file to use for the update of a tryjob.
+ parser.add_argument(
+ "--status_file",
+ required=True,
+ help="The absolute path to the JSON file that contains the tryjobs used "
+ "for bisecting LLVM.",
+ )
+
+ # Add argument that sets the 'status' field to that value.
+ parser.add_argument(
+ "--set_status",
+ required=True,
+ choices=[tryjob_status.value for tryjob_status in TryjobStatus],
+ help='Sets the "status" field of the tryjob.',
+ )
+
+ # Add argument that determines which revision to search for in the list of
+ # tryjobs.
+ parser.add_argument(
+ "--revision",
+ required=True,
+ type=int,
+ help="The revision to set its status.",
+ )
+
+ # Add argument for the custom script to execute for the 'custom_script'
+ # option in '--set_status'.
+ parser.add_argument(
+ "--custom_script",
+ help="The absolute path to the custom script to execute (its exit code "
+ 'should be %d for "good", %d for "bad", or %d for "skip")'
+ % (
+ CustomScriptStatus.GOOD.value,
+ CustomScriptStatus.BAD.value,
+ CustomScriptStatus.SKIP.value,
+ ),
+ )
+
+ args_output = parser.parse_args()
+
+ if not (
+ os.path.isfile(
+ args_output.status_file
+ and not args_output.status_file.endswith(".json")
+ )
+ ):
+ raise ValueError(
+ 'File does not exist or does not ending in ".json" '
+ ": %s" % args_output.status_file
+ )
+
+ if (
+ args_output.set_status == TryjobStatus.CUSTOM_SCRIPT.value
+ and not args_output.custom_script
+ ):
+ raise ValueError(
+ "Please provide the absolute path to the script to " "execute."
+ )
+
+ return args_output
def FindTryjobIndex(revision, tryjobs_list):
- """Searches the list of tryjob dictionaries to find 'revision'.
+ """Searches the list of tryjob dictionaries to find 'revision'.
- Uses the key 'rev' for each dictionary and compares the value against
- 'revision.'
+ Uses the key 'rev' for each dictionary and compares the value against
+ 'revision.'
- Args:
- revision: The revision to search for in the tryjobs.
- tryjobs_list: A list of tryjob dictionaries of the format:
- {
- 'rev' : [REVISION],
- 'url' : [URL_OF_CL],
- 'cl' : [CL_NUMBER],
- 'link' : [TRYJOB_LINK],
- 'status' : [TRYJOB_STATUS],
- 'buildbucket_id': [BUILDBUCKET_ID]
- }
+ Args:
+ revision: The revision to search for in the tryjobs.
+ tryjobs_list: A list of tryjob dictionaries of the format:
+ {
+ 'rev' : [REVISION],
+ 'url' : [URL_OF_CL],
+ 'cl' : [CL_NUMBER],
+ 'link' : [TRYJOB_LINK],
+ 'status' : [TRYJOB_STATUS],
+ 'buildbucket_id': [BUILDBUCKET_ID]
+ }
- Returns:
- The index within the list or None to indicate it was not found.
- """
+ Returns:
+ The index within the list or None to indicate it was not found.
+ """
- for cur_index, cur_tryjob_dict in enumerate(tryjobs_list):
- if cur_tryjob_dict['rev'] == revision:
- return cur_index
+ for cur_index, cur_tryjob_dict in enumerate(tryjobs_list):
+ if cur_tryjob_dict["rev"] == revision:
+ return cur_index
- return None
+ return None
def GetCustomScriptResult(custom_script, status_file, tryjob_contents):
- """Returns the conversion of the exit code of the custom script.
-
- Args:
- custom_script: Absolute path to the script to be executed.
- status_file: Absolute path to the file that contains information about the
- bisection of LLVM.
- tryjob_contents: A dictionary of the contents of the tryjob (e.g. 'status',
- 'url', 'link', 'buildbucket_id', etc.).
-
- Returns:
- The exit code conversion to either return 'good', 'bad', or 'skip'.
-
- Raises:
- ValueError: The custom script failed to provide the correct exit code.
- """
-
- # Create a temporary file to write the contents of the tryjob at index
- # 'tryjob_index' (the temporary file path will be passed into the custom
- # script as a command line argument).
- with CreateTemporaryJsonFile() as temp_json_file:
- with open(temp_json_file, 'w') as tryjob_file:
- json.dump(tryjob_contents, tryjob_file, indent=4, separators=(',', ': '))
-
- exec_script_cmd = [custom_script, temp_json_file]
-
- # Execute the custom script to get the exit code.
- exec_script_cmd_obj = subprocess.Popen(
- exec_script_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- _, stderr = exec_script_cmd_obj.communicate()
-
- # Invalid exit code by the custom script.
- if exec_script_cmd_obj.returncode not in custom_script_exit_value_mapping:
- # Save the .JSON file to the directory of 'status_file'.
- name_of_json_file = os.path.join(
- os.path.dirname(status_file), os.path.basename(temp_json_file))
-
- os.rename(temp_json_file, name_of_json_file)
-
- raise ValueError(
- 'Custom script %s exit code %d did not match '
- 'any of the expected exit codes: %d for "good", %d '
- 'for "bad", or %d for "skip".\nPlease check %s for information '
- 'about the tryjob: %s' %
- (custom_script, exec_script_cmd_obj.returncode,
- CustomScriptStatus.GOOD.value, CustomScriptStatus.BAD.value,
- CustomScriptStatus.SKIP.value, name_of_json_file, stderr))
-
- return custom_script_exit_value_mapping[exec_script_cmd_obj.returncode]
+ """Returns the conversion of the exit code of the custom script.
+
+ Args:
+ custom_script: Absolute path to the script to be executed.
+ status_file: Absolute path to the file that contains information about the
+ bisection of LLVM.
+ tryjob_contents: A dictionary of the contents of the tryjob (e.g. 'status',
+ 'url', 'link', 'buildbucket_id', etc.).
+
+ Returns:
+ The exit code conversion to either return 'good', 'bad', or 'skip'.
+
+ Raises:
+ ValueError: The custom script failed to provide the correct exit code.
+ """
+
+ # Create a temporary file to write the contents of the tryjob at index
+ # 'tryjob_index' (the temporary file path will be passed into the custom
+ # script as a command line argument).
+ with CreateTemporaryJsonFile() as temp_json_file:
+ with open(temp_json_file, "w") as tryjob_file:
+ json.dump(
+ tryjob_contents, tryjob_file, indent=4, separators=(",", ": ")
+ )
+
+ exec_script_cmd = [custom_script, temp_json_file]
+
+ # Execute the custom script to get the exit code.
+ exec_script_cmd_obj = subprocess.Popen(
+ exec_script_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE
+ )
+ _, stderr = exec_script_cmd_obj.communicate()
+
+ # Invalid exit code by the custom script.
+ if (
+ exec_script_cmd_obj.returncode
+ not in custom_script_exit_value_mapping
+ ):
+ # Save the .JSON file to the directory of 'status_file'.
+ name_of_json_file = os.path.join(
+ os.path.dirname(status_file), os.path.basename(temp_json_file)
+ )
+
+ os.rename(temp_json_file, name_of_json_file)
+
+ raise ValueError(
+ "Custom script %s exit code %d did not match "
+ 'any of the expected exit codes: %d for "good", %d '
+ 'for "bad", or %d for "skip".\nPlease check %s for information '
+ "about the tryjob: %s"
+ % (
+ custom_script,
+ exec_script_cmd_obj.returncode,
+ CustomScriptStatus.GOOD.value,
+ CustomScriptStatus.BAD.value,
+ CustomScriptStatus.SKIP.value,
+ name_of_json_file,
+ stderr,
+ )
+ )
+
+ return custom_script_exit_value_mapping[exec_script_cmd_obj.returncode]
def UpdateTryjobStatus(revision, set_status, status_file, custom_script):
- """Updates a tryjob's 'status' field based off of 'set_status'.
-
- Args:
- revision: The revision associated with the tryjob.
- set_status: What to update the 'status' field to.
- Ex: TryjobStatus.Good, TryjobStatus.BAD, TryjobStatus.PENDING, or
- TryjobStatus.
- status_file: The .JSON file that contains the tryjobs.
- custom_script: The absolute path to a script that will be executed which
- will determine the 'status' value of the tryjob.
- """
-
- # Format of 'bisect_contents':
- # {
- # 'start': [START_REVISION_OF_BISECTION]
- # 'end': [END_REVISION_OF_BISECTION]
- # 'jobs' : [
- # {[TRYJOB_INFORMATION]},
- # {[TRYJOB_INFORMATION]},
- # ...,
- # {[TRYJOB_INFORMATION]}
- # ]
- # }
- with open(status_file) as tryjobs:
- bisect_contents = json.load(tryjobs)
-
- if not bisect_contents['jobs']:
- sys.exit('No tryjobs in %s' % status_file)
-
- tryjob_index = FindTryjobIndex(revision, bisect_contents['jobs'])
-
- # 'FindTryjobIndex()' returns None if the revision was not found.
- if tryjob_index is None:
- raise ValueError('Unable to find tryjob for %d in %s' %
- (revision, status_file))
-
- # Set 'status' depending on 'set_status' for the tryjob.
- if set_status == TryjobStatus.GOOD:
- bisect_contents['jobs'][tryjob_index]['status'] = TryjobStatus.GOOD.value
- elif set_status == TryjobStatus.BAD:
- bisect_contents['jobs'][tryjob_index]['status'] = TryjobStatus.BAD.value
- elif set_status == TryjobStatus.PENDING:
- bisect_contents['jobs'][tryjob_index]['status'] = TryjobStatus.PENDING.value
- elif set_status == TryjobStatus.SKIP:
- bisect_contents['jobs'][tryjob_index]['status'] = TryjobStatus.SKIP.value
- elif set_status == TryjobStatus.CUSTOM_SCRIPT:
- bisect_contents['jobs'][tryjob_index]['status'] = GetCustomScriptResult(
- custom_script, status_file, bisect_contents['jobs'][tryjob_index])
- else:
- raise ValueError('Invalid "set_status" option provided: %s' % set_status)
-
- with open(status_file, 'w') as update_tryjobs:
- json.dump(bisect_contents, update_tryjobs, indent=4, separators=(',', ': '))
+ """Updates a tryjob's 'status' field based off of 'set_status'.
+
+ Args:
+ revision: The revision associated with the tryjob.
+ set_status: What to update the 'status' field to.
+ Ex: TryjobStatus.Good, TryjobStatus.BAD, TryjobStatus.PENDING, or
+ TryjobStatus.
+ status_file: The .JSON file that contains the tryjobs.
+ custom_script: The absolute path to a script that will be executed which
+ will determine the 'status' value of the tryjob.
+ """
+
+ # Format of 'bisect_contents':
+ # {
+ # 'start': [START_REVISION_OF_BISECTION]
+ # 'end': [END_REVISION_OF_BISECTION]
+ # 'jobs' : [
+ # {[TRYJOB_INFORMATION]},
+ # {[TRYJOB_INFORMATION]},
+ # ...,
+ # {[TRYJOB_INFORMATION]}
+ # ]
+ # }
+ with open(status_file) as tryjobs:
+ bisect_contents = json.load(tryjobs)
+
+ if not bisect_contents["jobs"]:
+ sys.exit("No tryjobs in %s" % status_file)
+
+ tryjob_index = FindTryjobIndex(revision, bisect_contents["jobs"])
+
+ # 'FindTryjobIndex()' returns None if the revision was not found.
+ if tryjob_index is None:
+ raise ValueError(
+ "Unable to find tryjob for %d in %s" % (revision, status_file)
+ )
+
+ # Set 'status' depending on 'set_status' for the tryjob.
+ if set_status == TryjobStatus.GOOD:
+ bisect_contents["jobs"][tryjob_index][
+ "status"
+ ] = TryjobStatus.GOOD.value
+ elif set_status == TryjobStatus.BAD:
+ bisect_contents["jobs"][tryjob_index]["status"] = TryjobStatus.BAD.value
+ elif set_status == TryjobStatus.PENDING:
+ bisect_contents["jobs"][tryjob_index][
+ "status"
+ ] = TryjobStatus.PENDING.value
+ elif set_status == TryjobStatus.SKIP:
+ bisect_contents["jobs"][tryjob_index][
+ "status"
+ ] = TryjobStatus.SKIP.value
+ elif set_status == TryjobStatus.CUSTOM_SCRIPT:
+ bisect_contents["jobs"][tryjob_index]["status"] = GetCustomScriptResult(
+ custom_script, status_file, bisect_contents["jobs"][tryjob_index]
+ )
+ else:
+ raise ValueError(
+ 'Invalid "set_status" option provided: %s' % set_status
+ )
+
+ with open(status_file, "w") as update_tryjobs:
+ json.dump(
+ bisect_contents, update_tryjobs, indent=4, separators=(",", ": ")
+ )
def main():
- """Updates the status of a tryjob."""
+ """Updates the status of a tryjob."""
- chroot.VerifyOutsideChroot()
+ chroot.VerifyOutsideChroot()
- args_output = GetCommandLineArgs()
+ args_output = GetCommandLineArgs()
- UpdateTryjobStatus(args_output.revision, TryjobStatus(args_output.set_status),
- args_output.status_file, args_output.custom_script)
+ UpdateTryjobStatus(
+ args_output.revision,
+ TryjobStatus(args_output.set_status),
+ args_output.status_file,
+ args_output.custom_script,
+ )
-if __name__ == '__main__':
- main()
+if __name__ == "__main__":
+ main()
diff --git a/llvm_tools/update_tryjob_status_unittest.py b/llvm_tools/update_tryjob_status_unittest.py
index c42c6718..fd9250a3 100755
--- a/llvm_tools/update_tryjob_status_unittest.py
+++ b/llvm_tools/update_tryjob_status_unittest.py
@@ -1,12 +1,11 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Copyright 2019 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests when updating a tryjob's status."""
-from __future__ import print_function
import json
import os
@@ -16,462 +15,522 @@ import unittest.mock as mock
from test_helpers import CreateTemporaryJsonFile
from test_helpers import WritePrettyJsonFile
-from update_tryjob_status import TryjobStatus
-from update_tryjob_status import CustomScriptStatus
import update_tryjob_status
+from update_tryjob_status import CustomScriptStatus
+from update_tryjob_status import TryjobStatus
class UpdateTryjobStatusTest(unittest.TestCase):
- """Unittests for updating a tryjob's 'status'."""
-
- def testFoundTryjobIndex(self):
- test_tryjobs = [{
- 'rev': 123,
- 'url': 'https://some_url_to_CL.com',
- 'cl': 'https://some_link_to_tryjob.com',
- 'status': 'good',
- 'buildbucket_id': 91835
- }, {
- 'rev': 1000,
- 'url': 'https://some_url_to_CL.com',
- 'cl': 'https://some_link_to_tryjob.com',
- 'status': 'pending',
- 'buildbucket_id': 10931
- }]
-
- expected_index = 0
-
- revision_to_find = 123
-
- self.assertEqual(
- update_tryjob_status.FindTryjobIndex(revision_to_find, test_tryjobs),
- expected_index)
-
- def testNotFindTryjobIndex(self):
- test_tryjobs = [{
- 'rev': 500,
- 'url': 'https://some_url_to_CL.com',
- 'cl': 'https://some_link_to_tryjob.com',
- 'status': 'bad',
- 'buildbucket_id': 390
- }, {
- 'rev': 10,
- 'url': 'https://some_url_to_CL.com',
- 'cl': 'https://some_link_to_tryjob.com',
- 'status': 'skip',
- 'buildbucket_id': 10
- }]
-
- revision_to_find = 250
-
- self.assertIsNone(
- update_tryjob_status.FindTryjobIndex(revision_to_find, test_tryjobs))
-
- @mock.patch.object(subprocess, 'Popen')
- # Simulate the behavior of `os.rename()` when successfully renamed a file.
- @mock.patch.object(os, 'rename', return_value=None)
- # Simulate the behavior of `os.path.basename()` when successfully retrieved
- # the basename of the temp .JSON file.
- @mock.patch.object(os.path, 'basename', return_value='tmpFile.json')
- def testInvalidExitCodeByCustomScript(self, mock_basename, mock_rename_file,
- mock_exec_custom_script):
-
- error_message_by_custom_script = 'Failed to parse .JSON file'
-
- # Simulate the behavior of 'subprocess.Popen()' when executing the custom
- # script.
- #
- # `Popen.communicate()` returns a tuple of `stdout` and `stderr`.
- mock_exec_custom_script.return_value.communicate.return_value = (
- None, error_message_by_custom_script)
-
- # Exit code of 1 is not in the mapping, so an exception will be raised.
- custom_script_exit_code = 1
-
- mock_exec_custom_script.return_value.returncode = custom_script_exit_code
-
- tryjob_contents = {
- 'status': 'good',
- 'rev': 1234,
- 'url': 'https://some_url_to_CL.com',
- 'link': 'https://some_url_to_tryjob.com'
- }
-
- custom_script_path = '/abs/path/to/script.py'
- status_file_path = '/abs/path/to/status_file.json'
-
- name_json_file = os.path.join(
- os.path.dirname(status_file_path), 'tmpFile.json')
-
- expected_error_message = (
- 'Custom script %s exit code %d did not match '
- 'any of the expected exit codes: %s for "good", '
- '%d for "bad", or %d for "skip".\nPlease check '
- '%s for information about the tryjob: %s' %
- (custom_script_path, custom_script_exit_code,
- CustomScriptStatus.GOOD.value, CustomScriptStatus.BAD.value,
- CustomScriptStatus.SKIP.value, name_json_file,
- error_message_by_custom_script))
-
- # Verify the exception is raised when the exit code by the custom script
- # does not match any of the exit codes in the mapping of
- # `custom_script_exit_value_mapping`.
- with self.assertRaises(ValueError) as err:
- update_tryjob_status.GetCustomScriptResult(custom_script_path,
- status_file_path,
- tryjob_contents)
-
- self.assertEqual(str(err.exception), expected_error_message)
-
- mock_exec_custom_script.assert_called_once()
-
- mock_rename_file.assert_called_once()
-
- mock_basename.assert_called_once()
-
- @mock.patch.object(subprocess, 'Popen')
- # Simulate the behavior of `os.rename()` when successfully renamed a file.
- @mock.patch.object(os, 'rename', return_value=None)
- # Simulate the behavior of `os.path.basename()` when successfully retrieved
- # the basename of the temp .JSON file.
- @mock.patch.object(os.path, 'basename', return_value='tmpFile.json')
- def testValidExitCodeByCustomScript(self, mock_basename, mock_rename_file,
- mock_exec_custom_script):
-
- # Simulate the behavior of 'subprocess.Popen()' when executing the custom
- # script.
- #
- # `Popen.communicate()` returns a tuple of `stdout` and `stderr`.
- mock_exec_custom_script.return_value.communicate.return_value = (None, None)
-
- mock_exec_custom_script.return_value.returncode = (
- CustomScriptStatus.GOOD.value)
-
- tryjob_contents = {
- 'status': 'good',
- 'rev': 1234,
- 'url': 'https://some_url_to_CL.com',
- 'link': 'https://some_url_to_tryjob.com'
- }
-
- custom_script_path = '/abs/path/to/script.py'
- status_file_path = '/abs/path/to/status_file.json'
-
- self.assertEqual(
- update_tryjob_status.GetCustomScriptResult(custom_script_path,
- status_file_path,
- tryjob_contents),
- TryjobStatus.GOOD.value)
-
- mock_exec_custom_script.assert_called_once()
-
- mock_rename_file.assert_not_called()
-
- mock_basename.assert_not_called()
-
- def testNoTryjobsInStatusFileWhenUpdatingTryjobStatus(self):
- bisect_test_contents = {'start': 369410, 'end': 369420, 'jobs': []}
-
- # Create a temporary .JSON file to simulate a .JSON file that has bisection
- # contents.
- with CreateTemporaryJsonFile() as temp_json_file:
- with open(temp_json_file, 'w') as f:
- WritePrettyJsonFile(bisect_test_contents, f)
-
- revision_to_update = 369412
-
- custom_script = None
-
- # Verify the exception is raised when the `status_file` does not have any
- # `jobs` (empty).
- with self.assertRaises(SystemExit) as err:
- update_tryjob_status.UpdateTryjobStatus(revision_to_update,
- TryjobStatus.GOOD,
- temp_json_file, custom_script)
-
- self.assertEqual(str(err.exception), 'No tryjobs in %s' % temp_json_file)
-
- # Simulate the behavior of `FindTryjobIndex()` when the tryjob does not exist
- # in the status file.
- @mock.patch.object(update_tryjob_status, 'FindTryjobIndex', return_value=None)
- def testNotFindTryjobIndexWhenUpdatingTryjobStatus(self,
- mock_find_tryjob_index):
-
- bisect_test_contents = {
- 'start': 369410,
- 'end': 369420,
- 'jobs': [{
- 'rev': 369411,
- 'status': 'pending'
- }]
- }
-
- # Create a temporary .JSON file to simulate a .JSON file that has bisection
- # contents.
- with CreateTemporaryJsonFile() as temp_json_file:
- with open(temp_json_file, 'w') as f:
- WritePrettyJsonFile(bisect_test_contents, f)
-
- revision_to_update = 369416
-
- custom_script = None
-
- # Verify the exception is raised when the `status_file` does not have any
- # `jobs` (empty).
- with self.assertRaises(ValueError) as err:
- update_tryjob_status.UpdateTryjobStatus(revision_to_update,
- TryjobStatus.SKIP,
- temp_json_file, custom_script)
-
- self.assertEqual(
- str(err.exception), 'Unable to find tryjob for %d in %s' %
- (revision_to_update, temp_json_file))
-
- mock_find_tryjob_index.assert_called_once()
-
- # Simulate the behavior of `FindTryjobIndex()` when the tryjob exists in the
- # status file.
- @mock.patch.object(update_tryjob_status, 'FindTryjobIndex', return_value=0)
- def testSuccessfullyUpdatedTryjobStatusToGood(self, mock_find_tryjob_index):
- bisect_test_contents = {
- 'start': 369410,
- 'end': 369420,
- 'jobs': [{
- 'rev': 369411,
- 'status': 'pending'
- }]
- }
-
- # Create a temporary .JSON file to simulate a .JSON file that has bisection
- # contents.
- with CreateTemporaryJsonFile() as temp_json_file:
- with open(temp_json_file, 'w') as f:
- WritePrettyJsonFile(bisect_test_contents, f)
-
- revision_to_update = 369411
-
- # Index of the tryjob that is going to have its 'status' value updated.
- tryjob_index = 0
-
- custom_script = None
-
- update_tryjob_status.UpdateTryjobStatus(revision_to_update,
- TryjobStatus.GOOD, temp_json_file,
- custom_script)
-
- # Verify that the tryjob's 'status' has been updated in the status file.
- with open(temp_json_file) as status_file:
- bisect_contents = json.load(status_file)
-
- self.assertEqual(bisect_contents['jobs'][tryjob_index]['status'],
- TryjobStatus.GOOD.value)
-
- mock_find_tryjob_index.assert_called_once()
-
- # Simulate the behavior of `FindTryjobIndex()` when the tryjob exists in the
- # status file.
- @mock.patch.object(update_tryjob_status, 'FindTryjobIndex', return_value=0)
- def testSuccessfullyUpdatedTryjobStatusToBad(self, mock_find_tryjob_index):
- bisect_test_contents = {
- 'start': 369410,
- 'end': 369420,
- 'jobs': [{
- 'rev': 369411,
- 'status': 'pending'
- }]
- }
-
- # Create a temporary .JSON file to simulate a .JSON file that has bisection
- # contents.
- with CreateTemporaryJsonFile() as temp_json_file:
- with open(temp_json_file, 'w') as f:
- WritePrettyJsonFile(bisect_test_contents, f)
-
- revision_to_update = 369411
-
- # Index of the tryjob that is going to have its 'status' value updated.
- tryjob_index = 0
-
- custom_script = None
-
- update_tryjob_status.UpdateTryjobStatus(revision_to_update,
- TryjobStatus.BAD, temp_json_file,
- custom_script)
-
- # Verify that the tryjob's 'status' has been updated in the status file.
- with open(temp_json_file) as status_file:
- bisect_contents = json.load(status_file)
-
- self.assertEqual(bisect_contents['jobs'][tryjob_index]['status'],
- TryjobStatus.BAD.value)
-
- mock_find_tryjob_index.assert_called_once()
-
- # Simulate the behavior of `FindTryjobIndex()` when the tryjob exists in the
- # status file.
- @mock.patch.object(update_tryjob_status, 'FindTryjobIndex', return_value=0)
- def testSuccessfullyUpdatedTryjobStatusToPending(self,
- mock_find_tryjob_index):
- bisect_test_contents = {
- 'start': 369410,
- 'end': 369420,
- 'jobs': [{
- 'rev': 369411,
- 'status': 'skip'
- }]
- }
-
- # Create a temporary .JSON file to simulate a .JSON file that has bisection
- # contents.
- with CreateTemporaryJsonFile() as temp_json_file:
- with open(temp_json_file, 'w') as f:
- WritePrettyJsonFile(bisect_test_contents, f)
-
- revision_to_update = 369411
-
- # Index of the tryjob that is going to have its 'status' value updated.
- tryjob_index = 0
-
- custom_script = None
-
- update_tryjob_status.UpdateTryjobStatus(
- revision_to_update, update_tryjob_status.TryjobStatus.SKIP,
- temp_json_file, custom_script)
-
- # Verify that the tryjob's 'status' has been updated in the status file.
- with open(temp_json_file) as status_file:
- bisect_contents = json.load(status_file)
-
- self.assertEqual(bisect_contents['jobs'][tryjob_index]['status'],
- update_tryjob_status.TryjobStatus.SKIP.value)
-
- mock_find_tryjob_index.assert_called_once()
-
- # Simulate the behavior of `FindTryjobIndex()` when the tryjob exists in the
- # status file.
- @mock.patch.object(update_tryjob_status, 'FindTryjobIndex', return_value=0)
- def testSuccessfullyUpdatedTryjobStatusToSkip(self, mock_find_tryjob_index):
- bisect_test_contents = {
- 'start': 369410,
- 'end': 369420,
- 'jobs': [{
- 'rev': 369411,
- 'status': 'pending',
- }]
- }
-
- # Create a temporary .JSON file to simulate a .JSON file that has bisection
- # contents.
- with CreateTemporaryJsonFile() as temp_json_file:
- with open(temp_json_file, 'w') as f:
- WritePrettyJsonFile(bisect_test_contents, f)
-
- revision_to_update = 369411
-
- # Index of the tryjob that is going to have its 'status' value updated.
- tryjob_index = 0
-
- custom_script = None
-
- update_tryjob_status.UpdateTryjobStatus(
- revision_to_update, update_tryjob_status.TryjobStatus.PENDING,
- temp_json_file, custom_script)
-
- # Verify that the tryjob's 'status' has been updated in the status file.
- with open(temp_json_file) as status_file:
- bisect_contents = json.load(status_file)
-
- self.assertEqual(bisect_contents['jobs'][tryjob_index]['status'],
- update_tryjob_status.TryjobStatus.PENDING.value)
-
- mock_find_tryjob_index.assert_called_once()
-
- @mock.patch.object(update_tryjob_status, 'FindTryjobIndex', return_value=0)
- @mock.patch.object(
- update_tryjob_status,
- 'GetCustomScriptResult',
- return_value=TryjobStatus.SKIP.value)
- def testUpdatedTryjobStatusToAutoPassedWithCustomScript(
- self, mock_get_custom_script_result, mock_find_tryjob_index):
- bisect_test_contents = {
- 'start': 369410,
- 'end': 369420,
- 'jobs': [{
- 'rev': 369411,
- 'status': 'pending',
- 'buildbucket_id': 1200
- }]
- }
-
- # Create a temporary .JSON file to simulate a .JSON file that has bisection
- # contents.
- with CreateTemporaryJsonFile() as temp_json_file:
- with open(temp_json_file, 'w') as f:
- WritePrettyJsonFile(bisect_test_contents, f)
-
- revision_to_update = 369411
-
- # Index of the tryjob that is going to have its 'status' value updated.
- tryjob_index = 0
-
- custom_script_path = '/abs/path/to/custom_script.py'
-
- update_tryjob_status.UpdateTryjobStatus(
- revision_to_update, update_tryjob_status.TryjobStatus.CUSTOM_SCRIPT,
- temp_json_file, custom_script_path)
-
- # Verify that the tryjob's 'status' has been updated in the status file.
- with open(temp_json_file) as status_file:
- bisect_contents = json.load(status_file)
-
- self.assertEqual(bisect_contents['jobs'][tryjob_index]['status'],
- update_tryjob_status.TryjobStatus.SKIP.value)
-
- mock_get_custom_script_result.assert_called_once()
-
- mock_find_tryjob_index.assert_called_once()
-
- # Simulate the behavior of `FindTryjobIndex()` when the tryjob exists in the
- # status file.
- @mock.patch.object(update_tryjob_status, 'FindTryjobIndex', return_value=0)
- def testSetStatusDoesNotExistWhenUpdatingTryjobStatus(self,
- mock_find_tryjob_index):
-
- bisect_test_contents = {
- 'start': 369410,
- 'end': 369420,
- 'jobs': [{
- 'rev': 369411,
- 'status': 'pending',
- 'buildbucket_id': 1200
- }]
- }
-
- # Create a temporary .JSON file to simulate a .JSON file that has bisection
- # contents.
- with CreateTemporaryJsonFile() as temp_json_file:
- with open(temp_json_file, 'w') as f:
- WritePrettyJsonFile(bisect_test_contents, f)
-
- revision_to_update = 369411
-
- nonexistent_update_status = 'revert_status'
-
- custom_script = None
-
- # Verify the exception is raised when the `set_status` command line
- # argument does not exist in the mapping.
- with self.assertRaises(ValueError) as err:
- update_tryjob_status.UpdateTryjobStatus(revision_to_update,
- nonexistent_update_status,
- temp_json_file, custom_script)
-
- self.assertEqual(
- str(err.exception),
- 'Invalid "set_status" option provided: revert_status')
-
- mock_find_tryjob_index.assert_called_once()
-
-
-if __name__ == '__main__':
- unittest.main()
+ """Unittests for updating a tryjob's 'status'."""
+
+ def testFoundTryjobIndex(self):
+ test_tryjobs = [
+ {
+ "rev": 123,
+ "url": "https://some_url_to_CL.com",
+ "cl": "https://some_link_to_tryjob.com",
+ "status": "good",
+ "buildbucket_id": 91835,
+ },
+ {
+ "rev": 1000,
+ "url": "https://some_url_to_CL.com",
+ "cl": "https://some_link_to_tryjob.com",
+ "status": "pending",
+ "buildbucket_id": 10931,
+ },
+ ]
+
+ expected_index = 0
+
+ revision_to_find = 123
+
+ self.assertEqual(
+ update_tryjob_status.FindTryjobIndex(
+ revision_to_find, test_tryjobs
+ ),
+ expected_index,
+ )
+
+ def testNotFindTryjobIndex(self):
+ test_tryjobs = [
+ {
+ "rev": 500,
+ "url": "https://some_url_to_CL.com",
+ "cl": "https://some_link_to_tryjob.com",
+ "status": "bad",
+ "buildbucket_id": 390,
+ },
+ {
+ "rev": 10,
+ "url": "https://some_url_to_CL.com",
+ "cl": "https://some_link_to_tryjob.com",
+ "status": "skip",
+ "buildbucket_id": 10,
+ },
+ ]
+
+ revision_to_find = 250
+
+ self.assertIsNone(
+ update_tryjob_status.FindTryjobIndex(revision_to_find, test_tryjobs)
+ )
+
+ @mock.patch.object(subprocess, "Popen")
+ # Simulate the behavior of `os.rename()` when successfully renamed a file.
+ @mock.patch.object(os, "rename", return_value=None)
+ # Simulate the behavior of `os.path.basename()` when successfully retrieved
+ # the basename of the temp .JSON file.
+ @mock.patch.object(os.path, "basename", return_value="tmpFile.json")
+ def testInvalidExitCodeByCustomScript(
+ self, mock_basename, mock_rename_file, mock_exec_custom_script
+ ):
+
+ error_message_by_custom_script = "Failed to parse .JSON file"
+
+ # Simulate the behavior of 'subprocess.Popen()' when executing the custom
+ # script.
+ #
+ # `Popen.communicate()` returns a tuple of `stdout` and `stderr`.
+ mock_exec_custom_script.return_value.communicate.return_value = (
+ None,
+ error_message_by_custom_script,
+ )
+
+ # Exit code of 1 is not in the mapping, so an exception will be raised.
+ custom_script_exit_code = 1
+
+ mock_exec_custom_script.return_value.returncode = (
+ custom_script_exit_code
+ )
+
+ tryjob_contents = {
+ "status": "good",
+ "rev": 1234,
+ "url": "https://some_url_to_CL.com",
+ "link": "https://some_url_to_tryjob.com",
+ }
+
+ custom_script_path = "/abs/path/to/script.py"
+ status_file_path = "/abs/path/to/status_file.json"
+
+ name_json_file = os.path.join(
+ os.path.dirname(status_file_path), "tmpFile.json"
+ )
+
+ expected_error_message = (
+ "Custom script %s exit code %d did not match "
+ 'any of the expected exit codes: %s for "good", '
+ '%d for "bad", or %d for "skip".\nPlease check '
+ "%s for information about the tryjob: %s"
+ % (
+ custom_script_path,
+ custom_script_exit_code,
+ CustomScriptStatus.GOOD.value,
+ CustomScriptStatus.BAD.value,
+ CustomScriptStatus.SKIP.value,
+ name_json_file,
+ error_message_by_custom_script,
+ )
+ )
+
+ # Verify the exception is raised when the exit code by the custom script
+ # does not match any of the exit codes in the mapping of
+ # `custom_script_exit_value_mapping`.
+ with self.assertRaises(ValueError) as err:
+ update_tryjob_status.GetCustomScriptResult(
+ custom_script_path, status_file_path, tryjob_contents
+ )
+
+ self.assertEqual(str(err.exception), expected_error_message)
+
+ mock_exec_custom_script.assert_called_once()
+
+ mock_rename_file.assert_called_once()
+
+ mock_basename.assert_called_once()
+
+ @mock.patch.object(subprocess, "Popen")
+ # Simulate the behavior of `os.rename()` when successfully renamed a file.
+ @mock.patch.object(os, "rename", return_value=None)
+ # Simulate the behavior of `os.path.basename()` when successfully retrieved
+ # the basename of the temp .JSON file.
+ @mock.patch.object(os.path, "basename", return_value="tmpFile.json")
+ def testValidExitCodeByCustomScript(
+ self, mock_basename, mock_rename_file, mock_exec_custom_script
+ ):
+
+ # Simulate the behavior of 'subprocess.Popen()' when executing the custom
+ # script.
+ #
+ # `Popen.communicate()` returns a tuple of `stdout` and `stderr`.
+ mock_exec_custom_script.return_value.communicate.return_value = (
+ None,
+ None,
+ )
+
+ mock_exec_custom_script.return_value.returncode = (
+ CustomScriptStatus.GOOD.value
+ )
+
+ tryjob_contents = {
+ "status": "good",
+ "rev": 1234,
+ "url": "https://some_url_to_CL.com",
+ "link": "https://some_url_to_tryjob.com",
+ }
+
+ custom_script_path = "/abs/path/to/script.py"
+ status_file_path = "/abs/path/to/status_file.json"
+
+ self.assertEqual(
+ update_tryjob_status.GetCustomScriptResult(
+ custom_script_path, status_file_path, tryjob_contents
+ ),
+ TryjobStatus.GOOD.value,
+ )
+
+ mock_exec_custom_script.assert_called_once()
+
+ mock_rename_file.assert_not_called()
+
+ mock_basename.assert_not_called()
+
+ def testNoTryjobsInStatusFileWhenUpdatingTryjobStatus(self):
+ bisect_test_contents = {"start": 369410, "end": 369420, "jobs": []}
+
+ # Create a temporary .JSON file to simulate a .JSON file that has bisection
+ # contents.
+ with CreateTemporaryJsonFile() as temp_json_file:
+ with open(temp_json_file, "w") as f:
+ WritePrettyJsonFile(bisect_test_contents, f)
+
+ revision_to_update = 369412
+
+ custom_script = None
+
+ # Verify the exception is raised when the `status_file` does not have any
+ # `jobs` (empty).
+ with self.assertRaises(SystemExit) as err:
+ update_tryjob_status.UpdateTryjobStatus(
+ revision_to_update,
+ TryjobStatus.GOOD,
+ temp_json_file,
+ custom_script,
+ )
+
+ self.assertEqual(
+ str(err.exception), "No tryjobs in %s" % temp_json_file
+ )
+
+ # Simulate the behavior of `FindTryjobIndex()` when the tryjob does not exist
+ # in the status file.
+ @mock.patch.object(
+ update_tryjob_status, "FindTryjobIndex", return_value=None
+ )
+ def testNotFindTryjobIndexWhenUpdatingTryjobStatus(
+ self, mock_find_tryjob_index
+ ):
+
+ bisect_test_contents = {
+ "start": 369410,
+ "end": 369420,
+ "jobs": [{"rev": 369411, "status": "pending"}],
+ }
+
+ # Create a temporary .JSON file to simulate a .JSON file that has bisection
+ # contents.
+ with CreateTemporaryJsonFile() as temp_json_file:
+ with open(temp_json_file, "w") as f:
+ WritePrettyJsonFile(bisect_test_contents, f)
+
+ revision_to_update = 369416
+
+ custom_script = None
+
+ # Verify the exception is raised when the `status_file` does not have any
+ # `jobs` (empty).
+ with self.assertRaises(ValueError) as err:
+ update_tryjob_status.UpdateTryjobStatus(
+ revision_to_update,
+ TryjobStatus.SKIP,
+ temp_json_file,
+ custom_script,
+ )
+
+ self.assertEqual(
+ str(err.exception),
+ "Unable to find tryjob for %d in %s"
+ % (revision_to_update, temp_json_file),
+ )
+
+ mock_find_tryjob_index.assert_called_once()
+
+ # Simulate the behavior of `FindTryjobIndex()` when the tryjob exists in the
+ # status file.
+ @mock.patch.object(update_tryjob_status, "FindTryjobIndex", return_value=0)
+ def testSuccessfullyUpdatedTryjobStatusToGood(self, mock_find_tryjob_index):
+ bisect_test_contents = {
+ "start": 369410,
+ "end": 369420,
+ "jobs": [{"rev": 369411, "status": "pending"}],
+ }
+
+ # Create a temporary .JSON file to simulate a .JSON file that has bisection
+ # contents.
+ with CreateTemporaryJsonFile() as temp_json_file:
+ with open(temp_json_file, "w") as f:
+ WritePrettyJsonFile(bisect_test_contents, f)
+
+ revision_to_update = 369411
+
+ # Index of the tryjob that is going to have its 'status' value updated.
+ tryjob_index = 0
+
+ custom_script = None
+
+ update_tryjob_status.UpdateTryjobStatus(
+ revision_to_update,
+ TryjobStatus.GOOD,
+ temp_json_file,
+ custom_script,
+ )
+
+ # Verify that the tryjob's 'status' has been updated in the status file.
+ with open(temp_json_file) as status_file:
+ bisect_contents = json.load(status_file)
+
+ self.assertEqual(
+ bisect_contents["jobs"][tryjob_index]["status"],
+ TryjobStatus.GOOD.value,
+ )
+
+ mock_find_tryjob_index.assert_called_once()
+
+ # Simulate the behavior of `FindTryjobIndex()` when the tryjob exists in the
+ # status file.
+ @mock.patch.object(update_tryjob_status, "FindTryjobIndex", return_value=0)
+ def testSuccessfullyUpdatedTryjobStatusToBad(self, mock_find_tryjob_index):
+ bisect_test_contents = {
+ "start": 369410,
+ "end": 369420,
+ "jobs": [{"rev": 369411, "status": "pending"}],
+ }
+
+ # Create a temporary .JSON file to simulate a .JSON file that has bisection
+ # contents.
+ with CreateTemporaryJsonFile() as temp_json_file:
+ with open(temp_json_file, "w") as f:
+ WritePrettyJsonFile(bisect_test_contents, f)
+
+ revision_to_update = 369411
+
+ # Index of the tryjob that is going to have its 'status' value updated.
+ tryjob_index = 0
+
+ custom_script = None
+
+ update_tryjob_status.UpdateTryjobStatus(
+ revision_to_update,
+ TryjobStatus.BAD,
+ temp_json_file,
+ custom_script,
+ )
+
+ # Verify that the tryjob's 'status' has been updated in the status file.
+ with open(temp_json_file) as status_file:
+ bisect_contents = json.load(status_file)
+
+ self.assertEqual(
+ bisect_contents["jobs"][tryjob_index]["status"],
+ TryjobStatus.BAD.value,
+ )
+
+ mock_find_tryjob_index.assert_called_once()
+
+ # Simulate the behavior of `FindTryjobIndex()` when the tryjob exists in the
+ # status file.
+ @mock.patch.object(update_tryjob_status, "FindTryjobIndex", return_value=0)
+ def testSuccessfullyUpdatedTryjobStatusToPending(
+ self, mock_find_tryjob_index
+ ):
+ bisect_test_contents = {
+ "start": 369410,
+ "end": 369420,
+ "jobs": [{"rev": 369411, "status": "skip"}],
+ }
+
+ # Create a temporary .JSON file to simulate a .JSON file that has bisection
+ # contents.
+ with CreateTemporaryJsonFile() as temp_json_file:
+ with open(temp_json_file, "w") as f:
+ WritePrettyJsonFile(bisect_test_contents, f)
+
+ revision_to_update = 369411
+
+ # Index of the tryjob that is going to have its 'status' value updated.
+ tryjob_index = 0
+
+ custom_script = None
+
+ update_tryjob_status.UpdateTryjobStatus(
+ revision_to_update,
+ update_tryjob_status.TryjobStatus.SKIP,
+ temp_json_file,
+ custom_script,
+ )
+
+ # Verify that the tryjob's 'status' has been updated in the status file.
+ with open(temp_json_file) as status_file:
+ bisect_contents = json.load(status_file)
+
+ self.assertEqual(
+ bisect_contents["jobs"][tryjob_index]["status"],
+ update_tryjob_status.TryjobStatus.SKIP.value,
+ )
+
+ mock_find_tryjob_index.assert_called_once()
+
+ # Simulate the behavior of `FindTryjobIndex()` when the tryjob exists in the
+ # status file.
+ @mock.patch.object(update_tryjob_status, "FindTryjobIndex", return_value=0)
+ def testSuccessfullyUpdatedTryjobStatusToSkip(self, mock_find_tryjob_index):
+ bisect_test_contents = {
+ "start": 369410,
+ "end": 369420,
+ "jobs": [
+ {
+ "rev": 369411,
+ "status": "pending",
+ }
+ ],
+ }
+
+ # Create a temporary .JSON file to simulate a .JSON file that has bisection
+ # contents.
+ with CreateTemporaryJsonFile() as temp_json_file:
+ with open(temp_json_file, "w") as f:
+ WritePrettyJsonFile(bisect_test_contents, f)
+
+ revision_to_update = 369411
+
+ # Index of the tryjob that is going to have its 'status' value updated.
+ tryjob_index = 0
+
+ custom_script = None
+
+ update_tryjob_status.UpdateTryjobStatus(
+ revision_to_update,
+ update_tryjob_status.TryjobStatus.PENDING,
+ temp_json_file,
+ custom_script,
+ )
+
+ # Verify that the tryjob's 'status' has been updated in the status file.
+ with open(temp_json_file) as status_file:
+ bisect_contents = json.load(status_file)
+
+ self.assertEqual(
+ bisect_contents["jobs"][tryjob_index]["status"],
+ update_tryjob_status.TryjobStatus.PENDING.value,
+ )
+
+ mock_find_tryjob_index.assert_called_once()
+
+ @mock.patch.object(update_tryjob_status, "FindTryjobIndex", return_value=0)
+ @mock.patch.object(
+ update_tryjob_status,
+ "GetCustomScriptResult",
+ return_value=TryjobStatus.SKIP.value,
+ )
+ def testUpdatedTryjobStatusToAutoPassedWithCustomScript(
+ self, mock_get_custom_script_result, mock_find_tryjob_index
+ ):
+ bisect_test_contents = {
+ "start": 369410,
+ "end": 369420,
+ "jobs": [
+ {"rev": 369411, "status": "pending", "buildbucket_id": 1200}
+ ],
+ }
+
+ # Create a temporary .JSON file to simulate a .JSON file that has bisection
+ # contents.
+ with CreateTemporaryJsonFile() as temp_json_file:
+ with open(temp_json_file, "w") as f:
+ WritePrettyJsonFile(bisect_test_contents, f)
+
+ revision_to_update = 369411
+
+ # Index of the tryjob that is going to have its 'status' value updated.
+ tryjob_index = 0
+
+ custom_script_path = "/abs/path/to/custom_script.py"
+
+ update_tryjob_status.UpdateTryjobStatus(
+ revision_to_update,
+ update_tryjob_status.TryjobStatus.CUSTOM_SCRIPT,
+ temp_json_file,
+ custom_script_path,
+ )
+
+ # Verify that the tryjob's 'status' has been updated in the status file.
+ with open(temp_json_file) as status_file:
+ bisect_contents = json.load(status_file)
+
+ self.assertEqual(
+ bisect_contents["jobs"][tryjob_index]["status"],
+ update_tryjob_status.TryjobStatus.SKIP.value,
+ )
+
+ mock_get_custom_script_result.assert_called_once()
+
+ mock_find_tryjob_index.assert_called_once()
+
+ # Simulate the behavior of `FindTryjobIndex()` when the tryjob exists in the
+ # status file.
+ @mock.patch.object(update_tryjob_status, "FindTryjobIndex", return_value=0)
+ def testSetStatusDoesNotExistWhenUpdatingTryjobStatus(
+ self, mock_find_tryjob_index
+ ):
+
+ bisect_test_contents = {
+ "start": 369410,
+ "end": 369420,
+ "jobs": [
+ {"rev": 369411, "status": "pending", "buildbucket_id": 1200}
+ ],
+ }
+
+ # Create a temporary .JSON file to simulate a .JSON file that has bisection
+ # contents.
+ with CreateTemporaryJsonFile() as temp_json_file:
+ with open(temp_json_file, "w") as f:
+ WritePrettyJsonFile(bisect_test_contents, f)
+
+ revision_to_update = 369411
+
+ nonexistent_update_status = "revert_status"
+
+ custom_script = None
+
+ # Verify the exception is raised when the `set_status` command line
+ # argument does not exist in the mapping.
+ with self.assertRaises(ValueError) as err:
+ update_tryjob_status.UpdateTryjobStatus(
+ revision_to_update,
+ nonexistent_update_status,
+ temp_json_file,
+ custom_script,
+ )
+
+ self.assertEqual(
+ str(err.exception),
+ 'Invalid "set_status" option provided: revert_status',
+ )
+
+ mock_find_tryjob_index.assert_called_once()
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/llvm_tools/upload_lexan_crashes_to_forcey.py b/llvm_tools/upload_lexan_crashes_to_forcey.py
index 61bf6b7d..885a88f6 100755
--- a/llvm_tools/upload_lexan_crashes_to_forcey.py
+++ b/llvm_tools/upload_lexan_crashes_to_forcey.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -16,142 +16,149 @@ import shutil
import subprocess
import sys
import tempfile
-from typing import Generator, List, Iterable
+from typing import Generator, Iterable, List
-gsurl_base = 'gs://chrome-clang-crash-reports/v1'
+
+gsurl_base = "gs://chrome-clang-crash-reports/v1"
def gsutil_ls(loc: str) -> List[str]:
- results = subprocess.run(['gsutil.py', 'ls', loc],
- stdout=subprocess.PIPE,
- check=True,
- encoding='utf-8')
- return [l.strip() for l in results.stdout.splitlines()]
+ results = subprocess.run(
+ ["gsutil.py", "ls", loc],
+ stdout=subprocess.PIPE,
+ check=True,
+ encoding="utf-8",
+ )
+ return [l.strip() for l in results.stdout.splitlines()]
def gsurl_ls_last_numbers(url: str) -> List[int]:
- return sorted(int(x.rstrip('/').split('/')[-1]) for x in gsutil_ls(url))
+ return sorted(int(x.rstrip("/").split("/")[-1]) for x in gsutil_ls(url))
def get_available_year_numbers() -> List[int]:
- return gsurl_ls_last_numbers(gsurl_base)
+ return gsurl_ls_last_numbers(gsurl_base)
def get_available_month_numbers(year: int) -> List[int]:
- return gsurl_ls_last_numbers(f'{gsurl_base}/{year}')
+ return gsurl_ls_last_numbers(f"{gsurl_base}/{year}")
def get_available_day_numbers(year: int, month: int) -> List[int]:
- return gsurl_ls_last_numbers(f'{gsurl_base}/{year}/{month:02d}')
+ return gsurl_ls_last_numbers(f"{gsurl_base}/{year}/{month:02d}")
def get_available_test_case_urls(year: int, month: int, day: int) -> List[str]:
- return gsutil_ls(f'{gsurl_base}/{year}/{month:02d}/{day:02d}')
+ return gsutil_ls(f"{gsurl_base}/{year}/{month:02d}/{day:02d}")
-def test_cases_on_or_after(date: datetime.datetime
- ) -> Generator[str, None, None]:
- """Yields all test-cases submitted on or after the given date."""
- for year in get_available_year_numbers():
- if year < date.year:
- continue
+def test_cases_on_or_after(
+ date: datetime.datetime,
+) -> Generator[str, None, None]:
+ """Yields all test-cases submitted on or after the given date."""
+ for year in get_available_year_numbers():
+ if year < date.year:
+ continue
- for month in get_available_month_numbers(year):
- if year == date.year and month < date.month:
- continue
+ for month in get_available_month_numbers(year):
+ if year == date.year and month < date.month:
+ continue
- for day in get_available_day_numbers(year, month):
- when = datetime.date(year, month, day)
- if when < date:
- continue
+ for day in get_available_day_numbers(year, month):
+ when = datetime.date(year, month, day)
+ if when < date:
+ continue
- yield when, get_available_test_case_urls(year, month, day)
+ yield when, get_available_test_case_urls(year, month, day)
def to_ymd(date: datetime.date) -> str:
- return date.strftime('%Y-%m-%d')
+ return date.strftime("%Y-%m-%d")
def from_ymd(date_str: str) -> datetime.date:
- return datetime.datetime.strptime(date_str, '%Y-%m-%d').date()
-
-
-def persist_state(seen_urls: Iterable[str], state_file: str,
- current_date: datetime.date):
- tmp_state_file = state_file + '.tmp'
- with open(tmp_state_file, 'w', encoding='utf-8') as f:
- json.dump(
- {
- 'already_seen': sorted(seen_urls),
- 'most_recent_date': to_ymd(current_date),
- },
- f,
- )
- os.rename(tmp_state_file, state_file)
+ return datetime.datetime.strptime(date_str, "%Y-%m-%d").date()
+
+
+def persist_state(
+ seen_urls: Iterable[str], state_file: str, current_date: datetime.date
+):
+ tmp_state_file = state_file + ".tmp"
+ with open(tmp_state_file, "w", encoding="utf-8") as f:
+ json.dump(
+ {
+ "already_seen": sorted(seen_urls),
+ "most_recent_date": to_ymd(current_date),
+ },
+ f,
+ )
+ os.rename(tmp_state_file, state_file)
@contextlib.contextmanager
def temp_dir() -> Generator[str, None, None]:
- loc = tempfile.mkdtemp('lexan-autosubmit')
- try:
- yield loc
- finally:
- shutil.rmtree(loc)
+ loc = tempfile.mkdtemp("lexan-autosubmit")
+ try:
+ yield loc
+ finally:
+ shutil.rmtree(loc)
def download_and_unpack_test_case(gs_url: str, tempdir: str) -> None:
- suffix = os.path.splitext(gs_url)[1]
- target_name = 'test_case' + suffix
- target = os.path.join(tempdir, target_name)
- subprocess.run(['gsutil.py', 'cp', gs_url, target], check=True)
- subprocess.run(['tar', 'xaf', target_name], check=True, cwd=tempdir)
- os.unlink(target)
+ suffix = os.path.splitext(gs_url)[1]
+ target_name = "test_case" + suffix
+ target = os.path.join(tempdir, target_name)
+ subprocess.run(["gsutil.py", "cp", gs_url, target], check=True)
+ subprocess.run(["tar", "xaf", target_name], check=True, cwd=tempdir)
+ os.unlink(target)
def submit_test_case(gs_url: str, cr_tool: str) -> None:
- logging.info('Submitting %s', gs_url)
- with temp_dir() as tempdir:
- download_and_unpack_test_case(gs_url, tempdir)
-
- # Sometimes (e.g., in
- # gs://chrome-clang-crash-reports/v1/2020/03/27/
- # chromium.clang-ToTiOS-12754-GTXToolKit-2bfcde.tgz)
- # we'll get `.crash` files. Unclear why, but let's filter them out anyway.
- repro_files = [
- os.path.join(tempdir, x)
- for x in os.listdir(tempdir)
- if not x.endswith('.crash')
- ]
- assert len(repro_files) == 2, repro_files
- if repro_files[0].endswith('.sh'):
- sh_file, src_file = repro_files
- assert not src_file.endswith('.sh'), repro_files
- else:
- src_file, sh_file = repro_files
- assert sh_file.endswith('.sh'), repro_files
-
- # Peephole: lexan got a crash upload with a way old clang. Ignore it.
- with open(sh_file, encoding='utf-8') as f:
- if 'Crash reproducer for clang version 9.0.0' in f.read():
- logging.warning('Skipping upload for %s; seems to be with an old clang',
- gs_url)
- return
-
- subprocess.run(
- [
- cr_tool,
- 'reduce',
- '-stream=false',
- '-wait=false',
- '-note',
- gs_url,
- '-sh_file',
- os.path.join(tempdir, sh_file),
- '-src_file',
- os.path.join(tempdir, src_file),
- ],
- check=True,
- )
+ logging.info("Submitting %s", gs_url)
+ with temp_dir() as tempdir:
+ download_and_unpack_test_case(gs_url, tempdir)
+
+ # Sometimes (e.g., in
+ # gs://chrome-clang-crash-reports/v1/2020/03/27/
+ # chromium.clang-ToTiOS-12754-GTXToolKit-2bfcde.tgz)
+ # we'll get `.crash` files. Unclear why, but let's filter them out anyway.
+ repro_files = [
+ os.path.join(tempdir, x)
+ for x in os.listdir(tempdir)
+ if not x.endswith(".crash")
+ ]
+ assert len(repro_files) == 2, repro_files
+ if repro_files[0].endswith(".sh"):
+ sh_file, src_file = repro_files
+ assert not src_file.endswith(".sh"), repro_files
+ else:
+ src_file, sh_file = repro_files
+ assert sh_file.endswith(".sh"), repro_files
+
+ # Peephole: lexan got a crash upload with a way old clang. Ignore it.
+ with open(sh_file, encoding="utf-8") as f:
+ if "Crash reproducer for clang version 9.0.0" in f.read():
+ logging.warning(
+ "Skipping upload for %s; seems to be with an old clang",
+ gs_url,
+ )
+ return
+
+ subprocess.run(
+ [
+ cr_tool,
+ "reduce",
+ "-stream=false",
+ "-wait=false",
+ "-note",
+ gs_url,
+ "-sh_file",
+ os.path.join(tempdir, sh_file),
+ "-src_file",
+ os.path.join(tempdir, src_file),
+ ],
+ check=True,
+ )
def submit_new_test_cases(
@@ -160,112 +167,119 @@ def submit_new_test_cases(
forcey: str,
state_file_path: str,
) -> None:
- """Submits new test-cases to forcey.
-
- This will persist state after each test-case is submitted.
-
- Args:
- last_seen_test_cases: test-cases which have been submitted already, and
- should be skipped if seen again.
- earliest_date_to_check: the earliest date we should consider test-cases
- from.
- forcey: path to the forcey binary.
- state_file_path: path to our state file.
- """
- # `all_test_cases_seen` is the union of all test-cases seen on this and prior
- # invocations. It guarantees, in all cases we care about, that we won't
- # submit the same test-case twice. `test_cases_seen_this_invocation` is
- # persisted as "all of the test-cases we've seen on this and prior
- # invocations" if we successfully submit _all_ test-cases.
- #
- # Since you can visualize the test-cases this script considers as a sliding
- # window that only moves forward, if we saw a test-case on a prior iteration
- # but no longer see it, we'll never see it again (since it fell out of our
- # sliding window by being too old). Hence, keeping it around is
- # pointless.
- #
- # We only persist this minimized set of test-cases if _everything_ succeeds,
- # since if something fails below, there's a chance that we haven't revisited
- # test-cases that we've already seen.
- all_test_cases_seen = set(last_seen_test_cases)
- test_cases_seen_this_invocation = []
- most_recent_date = earliest_date_to_check
- for date, candidates in test_cases_on_or_after(earliest_date_to_check):
- most_recent_date = max(most_recent_date, date)
-
- for url in candidates:
- test_cases_seen_this_invocation.append(url)
- if url in all_test_cases_seen:
- continue
-
- all_test_cases_seen.add(url)
- submit_test_case(url, forcey)
-
- # Persisting on each iteration of this loop isn't free, but it's the
- # easiest way to not resubmit test-cases, and it's good to keep in mind
- # that:
- # - the state file will be small (<12KB, since it only keeps a few days
- # worth of test-cases after the first run)
- # - in addition to this, we're downloading+unzipping+reuploading multiple
- # MB of test-case bytes.
- #
- # So comparatively, the overhead here probably isn't an issue.
- persist_state(all_test_cases_seen, state_file_path, most_recent_date)
-
- persist_state(test_cases_seen_this_invocation, state_file_path,
- most_recent_date)
+ """Submits new test-cases to forcey.
+
+ This will persist state after each test-case is submitted.
+
+ Args:
+ last_seen_test_cases: test-cases which have been submitted already, and
+ should be skipped if seen again.
+ earliest_date_to_check: the earliest date we should consider test-cases
+ from.
+ forcey: path to the forcey binary.
+ state_file_path: path to our state file.
+ """
+ # `all_test_cases_seen` is the union of all test-cases seen on this and prior
+ # invocations. It guarantees, in all cases we care about, that we won't
+ # submit the same test-case twice. `test_cases_seen_this_invocation` is
+ # persisted as "all of the test-cases we've seen on this and prior
+ # invocations" if we successfully submit _all_ test-cases.
+ #
+ # Since you can visualize the test-cases this script considers as a sliding
+ # window that only moves forward, if we saw a test-case on a prior iteration
+ # but no longer see it, we'll never see it again (since it fell out of our
+ # sliding window by being too old). Hence, keeping it around is
+ # pointless.
+ #
+ # We only persist this minimized set of test-cases if _everything_ succeeds,
+ # since if something fails below, there's a chance that we haven't revisited
+ # test-cases that we've already seen.
+ all_test_cases_seen = set(last_seen_test_cases)
+ test_cases_seen_this_invocation = []
+ most_recent_date = earliest_date_to_check
+ for date, candidates in test_cases_on_or_after(earliest_date_to_check):
+ most_recent_date = max(most_recent_date, date)
+
+ for url in candidates:
+ test_cases_seen_this_invocation.append(url)
+ if url in all_test_cases_seen:
+ continue
+
+ all_test_cases_seen.add(url)
+ submit_test_case(url, forcey)
+
+ # Persisting on each iteration of this loop isn't free, but it's the
+ # easiest way to not resubmit test-cases, and it's good to keep in mind
+ # that:
+ # - the state file will be small (<12KB, since it only keeps a few days
+ # worth of test-cases after the first run)
+ # - in addition to this, we're downloading+unzipping+reuploading multiple
+ # MB of test-case bytes.
+ #
+ # So comparatively, the overhead here probably isn't an issue.
+ persist_state(
+ all_test_cases_seen, state_file_path, most_recent_date
+ )
+
+ persist_state(
+ test_cases_seen_this_invocation, state_file_path, most_recent_date
+ )
def main(argv: List[str]):
- logging.basicConfig(
- format='>> %(asctime)s: %(levelname)s: %(filename)s:%(lineno)d: '
- '%(message)s',
- level=logging.INFO,
- )
-
- my_dir = os.path.dirname(os.path.abspath(__file__))
-
- parser = argparse.ArgumentParser(description=__doc__)
- parser.add_argument(
- '--state_file', default=os.path.join(my_dir, 'lexan-state.json'))
- parser.add_argument(
- '--last_date',
- help='The earliest date that we care about. All test cases from here '
- 'on will be picked up. Format is YYYY-MM-DD.')
- parser.add_argument(
- '--4c', dest='forcey', required=True, help='Path to a 4c client binary')
- opts = parser.parse_args(argv)
-
- forcey = opts.forcey
- state_file = opts.state_file
- last_date_str = opts.last_date
-
- os.makedirs(os.path.dirname(state_file), 0o755, exist_ok=True)
-
- if last_date_str is None:
- with open(state_file, encoding='utf-8') as f:
- data = json.load(f)
- most_recent_date = from_ymd(data['most_recent_date'])
- submit_new_test_cases(
- last_seen_test_cases=data['already_seen'],
- # Note that we always subtract one day from this to avoid a race:
- # uploads may appear slightly out-of-order (or builders may lag, or
- # ...), so the last test-case uploaded for 2020/01/01 might appear
- # _after_ the first test-case for 2020/01/02. Assuming that builders
- # won't lag behind for over a day, the easiest way to handle this is to
- # always check the previous and current days.
- earliest_date_to_check=most_recent_date - datetime.timedelta(days=1),
- forcey=forcey,
- state_file_path=state_file,
+ logging.basicConfig(
+ format=">> %(asctime)s: %(levelname)s: %(filename)s:%(lineno)d: "
+ "%(message)s",
+ level=logging.INFO,
)
- else:
- submit_new_test_cases(
- last_seen_test_cases=(),
- earliest_date_to_check=from_ymd(last_date_str),
- forcey=forcey,
- state_file_path=state_file,
+
+ my_dir = os.path.dirname(os.path.abspath(__file__))
+
+ parser = argparse.ArgumentParser(description=__doc__)
+ parser.add_argument(
+ "--state_file", default=os.path.join(my_dir, "lexan-state.json")
)
+ parser.add_argument(
+ "--last_date",
+ help="The earliest date that we care about. All test cases from here "
+ "on will be picked up. Format is YYYY-MM-DD.",
+ )
+ parser.add_argument(
+ "--4c", dest="forcey", required=True, help="Path to a 4c client binary"
+ )
+ opts = parser.parse_args(argv)
+
+ forcey = opts.forcey
+ state_file = opts.state_file
+ last_date_str = opts.last_date
+
+ os.makedirs(os.path.dirname(state_file), 0o755, exist_ok=True)
+
+ if last_date_str is None:
+ with open(state_file, encoding="utf-8") as f:
+ data = json.load(f)
+ most_recent_date = from_ymd(data["most_recent_date"])
+ submit_new_test_cases(
+ last_seen_test_cases=data["already_seen"],
+ # Note that we always subtract one day from this to avoid a race:
+ # uploads may appear slightly out-of-order (or builders may lag, or
+ # ...), so the last test-case uploaded for 2020/01/01 might appear
+ # _after_ the first test-case for 2020/01/02. Assuming that builders
+ # won't lag behind for over a day, the easiest way to handle this is to
+ # always check the previous and current days.
+ earliest_date_to_check=most_recent_date
+ - datetime.timedelta(days=1),
+ forcey=forcey,
+ state_file_path=state_file,
+ )
+ else:
+ submit_new_test_cases(
+ last_seen_test_cases=(),
+ earliest_date_to_check=from_ymd(last_date_str),
+ forcey=forcey,
+ state_file_path=state_file,
+ )
-if __name__ == '__main__':
- sys.exit(main(sys.argv[1:]))
+if __name__ == "__main__":
+ sys.exit(main(sys.argv[1:]))
diff --git a/llvm_tools/upload_lexan_crashes_to_forcey_test.py b/llvm_tools/upload_lexan_crashes_to_forcey_test.py
index 937cbf8e..7238281a 100755
--- a/llvm_tools/upload_lexan_crashes_to_forcey_test.py
+++ b/llvm_tools/upload_lexan_crashes_to_forcey_test.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
-# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Copyright 2020 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -15,132 +15,152 @@ import upload_lexan_crashes_to_forcey
class Test(unittest.TestCase):
- """Tests for upload_lexan_crashes_to_forcey."""
-
- def test_date_parsing_functions(self):
- self.assertEqual(
- datetime.date(2020, 2, 1),
- upload_lexan_crashes_to_forcey.from_ymd('2020-02-01'))
-
- @unittest.mock.patch(
- 'upload_lexan_crashes_to_forcey.test_cases_on_or_after',
- return_value=(
- (
- datetime.date(2020, 1, 1),
- ('gs://test-case-1', 'gs://test-case-1.1'),
- ),
- (datetime.date(2020, 1, 2), ('gs://test-case-2',)),
- (datetime.date(2020, 1, 1), ('gs://test-case-3',)),
- (datetime.date(2020, 1, 4), ('gs://test-case-4',)),
- ))
- @unittest.mock.patch('upload_lexan_crashes_to_forcey.submit_test_case')
- @unittest.mock.patch('upload_lexan_crashes_to_forcey.persist_state')
- def test_new_test_case_submission_functions(self, persist_state_mock,
- submit_test_case_mock,
- test_cases_on_or_after_mock):
- forcey_path = '/path/to/4c'
- real_state_file_path = '/path/to/state/file'
- earliest_date = datetime.date(2020, 1, 1)
-
- persist_state_calls = []
-
- # Since the set this gets is mutated, we need to copy it somehow.
- def persist_state_side_effect(test_cases_to_persist, state_file_path,
- most_recent_date):
- self.assertEqual(state_file_path, real_state_file_path)
- persist_state_calls.append(
- (sorted(test_cases_to_persist), most_recent_date))
-
- persist_state_mock.side_effect = persist_state_side_effect
-
- upload_lexan_crashes_to_forcey.submit_new_test_cases(
- last_seen_test_cases=(
- 'gs://test-case-0',
- 'gs://test-case-1',
+ """Tests for upload_lexan_crashes_to_forcey."""
+
+ def test_date_parsing_functions(self):
+ self.assertEqual(
+ datetime.date(2020, 2, 1),
+ upload_lexan_crashes_to_forcey.from_ymd("2020-02-01"),
+ )
+
+ @unittest.mock.patch(
+ "upload_lexan_crashes_to_forcey.test_cases_on_or_after",
+ return_value=(
+ (
+ datetime.date(2020, 1, 1),
+ ("gs://test-case-1", "gs://test-case-1.1"),
+ ),
+ (datetime.date(2020, 1, 2), ("gs://test-case-2",)),
+ (datetime.date(2020, 1, 1), ("gs://test-case-3",)),
+ (datetime.date(2020, 1, 4), ("gs://test-case-4",)),
),
- earliest_date_to_check=earliest_date,
- forcey=forcey_path,
- state_file_path=real_state_file_path,
)
-
- test_cases_on_or_after_mock.assert_called_once_with(earliest_date)
- self.assertEqual(submit_test_case_mock.call_args_list, [
- unittest.mock.call('gs://test-case-1.1', forcey_path),
- unittest.mock.call('gs://test-case-2', forcey_path),
- unittest.mock.call('gs://test-case-3', forcey_path),
- unittest.mock.call('gs://test-case-4', forcey_path),
- ])
-
- self.assertEqual(persist_state_calls, [
- (
- ['gs://test-case-0', 'gs://test-case-1', 'gs://test-case-1.1'],
- datetime.date(2020, 1, 1),
- ),
- (
- [
- 'gs://test-case-0',
- 'gs://test-case-1',
- 'gs://test-case-1.1',
- 'gs://test-case-2',
- ],
- datetime.date(2020, 1, 2),
- ),
- (
- [
- 'gs://test-case-0',
- 'gs://test-case-1',
- 'gs://test-case-1.1',
- 'gs://test-case-2',
- 'gs://test-case-3',
- ],
- datetime.date(2020, 1, 2),
- ),
- (
+ @unittest.mock.patch("upload_lexan_crashes_to_forcey.submit_test_case")
+ @unittest.mock.patch("upload_lexan_crashes_to_forcey.persist_state")
+ def test_new_test_case_submission_functions(
+ self,
+ persist_state_mock,
+ submit_test_case_mock,
+ test_cases_on_or_after_mock,
+ ):
+ forcey_path = "/path/to/4c"
+ real_state_file_path = "/path/to/state/file"
+ earliest_date = datetime.date(2020, 1, 1)
+
+ persist_state_calls = []
+
+ # Since the set this gets is mutated, we need to copy it somehow.
+ def persist_state_side_effect(
+ test_cases_to_persist, state_file_path, most_recent_date
+ ):
+ self.assertEqual(state_file_path, real_state_file_path)
+ persist_state_calls.append(
+ (sorted(test_cases_to_persist), most_recent_date)
+ )
+
+ persist_state_mock.side_effect = persist_state_side_effect
+
+ upload_lexan_crashes_to_forcey.submit_new_test_cases(
+ last_seen_test_cases=(
+ "gs://test-case-0",
+ "gs://test-case-1",
+ ),
+ earliest_date_to_check=earliest_date,
+ forcey=forcey_path,
+ state_file_path=real_state_file_path,
+ )
+
+ test_cases_on_or_after_mock.assert_called_once_with(earliest_date)
+ self.assertEqual(
+ submit_test_case_mock.call_args_list,
[
- 'gs://test-case-0',
- 'gs://test-case-1',
- 'gs://test-case-1.1',
- 'gs://test-case-2',
- 'gs://test-case-3',
- 'gs://test-case-4',
+ unittest.mock.call("gs://test-case-1.1", forcey_path),
+ unittest.mock.call("gs://test-case-2", forcey_path),
+ unittest.mock.call("gs://test-case-3", forcey_path),
+ unittest.mock.call("gs://test-case-4", forcey_path),
],
- datetime.date(2020, 1, 4),
- ),
- (
+ )
+
+ self.assertEqual(
+ persist_state_calls,
[
- 'gs://test-case-1',
- 'gs://test-case-1.1',
- 'gs://test-case-2',
- 'gs://test-case-3',
- 'gs://test-case-4',
+ (
+ [
+ "gs://test-case-0",
+ "gs://test-case-1",
+ "gs://test-case-1.1",
+ ],
+ datetime.date(2020, 1, 1),
+ ),
+ (
+ [
+ "gs://test-case-0",
+ "gs://test-case-1",
+ "gs://test-case-1.1",
+ "gs://test-case-2",
+ ],
+ datetime.date(2020, 1, 2),
+ ),
+ (
+ [
+ "gs://test-case-0",
+ "gs://test-case-1",
+ "gs://test-case-1.1",
+ "gs://test-case-2",
+ "gs://test-case-3",
+ ],
+ datetime.date(2020, 1, 2),
+ ),
+ (
+ [
+ "gs://test-case-0",
+ "gs://test-case-1",
+ "gs://test-case-1.1",
+ "gs://test-case-2",
+ "gs://test-case-3",
+ "gs://test-case-4",
+ ],
+ datetime.date(2020, 1, 4),
+ ),
+ (
+ [
+ "gs://test-case-1",
+ "gs://test-case-1.1",
+ "gs://test-case-2",
+ "gs://test-case-3",
+ "gs://test-case-4",
+ ],
+ datetime.date(2020, 1, 4),
+ ),
],
- datetime.date(2020, 1, 4),
- ),
- ])
+ )
- @unittest.mock.patch(
- 'upload_lexan_crashes_to_forcey.download_and_unpack_test_case')
- @unittest.mock.patch('subprocess.run')
- def test_test_case_submission_functions(self, subprocess_run_mock,
- download_and_unpack_mock):
- mock_gs_url = 'gs://foo/bar/baz'
+ @unittest.mock.patch(
+ "upload_lexan_crashes_to_forcey.download_and_unpack_test_case"
+ )
+ @unittest.mock.patch("subprocess.run")
+ def test_test_case_submission_functions(
+ self, subprocess_run_mock, download_and_unpack_mock
+ ):
+ mock_gs_url = "gs://foo/bar/baz"
- def side_effect(gs_url: str, tempdir: str) -> None:
- self.assertEqual(gs_url, mock_gs_url)
+ def side_effect(gs_url: str, tempdir: str) -> None:
+ self.assertEqual(gs_url, mock_gs_url)
- with open(os.path.join(tempdir, 'test_case.c'), 'w') as f:
- # All we need is an empty file here.
- pass
+ with open(os.path.join(tempdir, "test_case.c"), "w") as f:
+ # All we need is an empty file here.
+ pass
- with open(
- os.path.join(tempdir, 'test_case.sh'), 'w', encoding='utf-8') as f:
- f.write('# Crash reproducer for clang version 9.0.0 (...)\n')
- f.write('clang something or other\n')
+ with open(
+ os.path.join(tempdir, "test_case.sh"), "w", encoding="utf-8"
+ ) as f:
+ f.write("# Crash reproducer for clang version 9.0.0 (...)\n")
+ f.write("clang something or other\n")
- download_and_unpack_mock.side_effect = side_effect
- upload_lexan_crashes_to_forcey.submit_test_case(mock_gs_url, '4c')
- subprocess_run_mock.assert_not_called()
+ download_and_unpack_mock.side_effect = side_effect
+ upload_lexan_crashes_to_forcey.submit_test_case(mock_gs_url, "4c")
+ subprocess_run_mock.assert_not_called()
-if __name__ == '__main__':
- unittest.main()
+if __name__ == "__main__":
+ unittest.main()