aboutsummaryrefslogtreecommitdiff
path: root/llvm_tools
diff options
context:
space:
mode:
Diffstat (limited to 'llvm_tools')
-rw-r--r--llvm_tools/README.md44
-rwxr-xr-xllvm_tools/bisect_clang_crashes.py139
-rwxr-xr-xllvm_tools/bisect_clang_crashes_unittest.py92
-rwxr-xr-xllvm_tools/cherrypick_cl.py34
-rwxr-xr-xllvm_tools/fetch_cros_sdk_rolls.py112
-rwxr-xr-xllvm_tools/get_llvm_hash.py46
-rwxr-xr-xllvm_tools/get_llvm_hash_unittest.py5
-rwxr-xr-xllvm_tools/git_llvm_rev.py14
-rwxr-xr-xllvm_tools/git_llvm_rev_test.py30
-rwxr-xr-xllvm_tools/llvm_bisection.py278
-rwxr-xr-xllvm_tools/llvm_bisection_unittest.py624
-rw-r--r--llvm_tools/llvm_project.py11
-rwxr-xr-xllvm_tools/nightly_revert_checker.py9
-rwxr-xr-xllvm_tools/upload_lexan_crashes_to_forcey.py258
-rwxr-xr-xllvm_tools/upload_lexan_crashes_to_forcey_test.py122
15 files changed, 1208 insertions, 610 deletions
diff --git a/llvm_tools/README.md b/llvm_tools/README.md
index 1c1862d8..783ec22d 100644
--- a/llvm_tools/README.md
+++ b/llvm_tools/README.md
@@ -278,6 +278,7 @@ $ ./auto_llvm_bisection.py --start_rev 369410 --end_rev 369420 \
--last_tested /abs/path/to/last_tested_file.json \
--extra_change_lists 513590 1394249 \
--options latest-toolchain nochromesdk \
+ --chroot_path /path/to/chromeos/chroot \
--builder eve-release-tryjob
```
@@ -488,7 +489,7 @@ these synthesized numbers and git SHAs. Usage should be straightforward:
6f635f90929da9545dd696071a829a1a42f84b30
~> ./git_llvm_rev.py --llvm_dir llvm-project-copy/ --sha 6f635f90929da9545dd696071a829a1a42f84b30
r380000
-~> ./git_llvm_rev.py --llvm_dir llvm-project-copy/ --sha origin/master
+~> ./git_llvm_rev.py --llvm_dir llvm-project-copy/ --sha origin/some-branch
r387778
```
@@ -550,3 +551,44 @@ PYTHONPATH=../ ./nightly_revert_checker.py \
--llvm_dir llvm-project-copy \
--chromeos_dir ../../../../
```
+
+### `bisect_clang_crashes.py`
+
+This script downloads clang crash diagnoses from
+gs://chromeos-toolchain-artifacts/clang-crash-diagnoses and sends them to 4c for
+bisection.
+
+Usage example:
+
+```
+$ ./bisect_clang_crashes.py --4c 4c-cli --state_file ./output/state.json
+```
+
+The above command downloads the artifacts of clang crash diagnoses and send them
+to 4c server for bisection. The summary of submitted jobs will be saved in
+output/state.json under the current path. The output directory will be created
+automatically if it does not exist yet. To get more information of the submitted
+jobs, please refer to go/4c-cli.
+
+### `upload_lexan_crashes_to_forcey.py`
+
+This script downloads clang crash diagnoses from Lexan's bucket and sends them
+to 4c for bisection.
+
+Usage example:
+
+```
+$ ./upload_lexan_crashes_to_forcey.py --4c 4c-cli \
+ --state_file ./output/state.json
+```
+
+The above command downloads the artifacts of clang crash diagnoses and send them
+to 4c server for bisection. The summary of submitted jobs will be saved in
+output/state.json under the current path. The output directory will be created
+automatically if it does not exist yet. To get more information of the submitted
+jobs, please refer to go/4c-cli.
+
+Note that it's recommended to 'seed' the state file with a most recent upload
+date. This can be done by running this tool *once* with a `--last_date` flag.
+This flag has the script override whatever's in the state file (if anything) and
+start submitting all crashes uploaded starting at the given day.
diff --git a/llvm_tools/bisect_clang_crashes.py b/llvm_tools/bisect_clang_crashes.py
new file mode 100755
index 00000000..e8ee2ab6
--- /dev/null
+++ b/llvm_tools/bisect_clang_crashes.py
@@ -0,0 +1,139 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Fetches and submits the artifacts from Chrome OS toolchain's crash bucket.
+"""
+
+# pylint: disable=cros-logging-import
+
+import argparse
+import glob
+import json
+import logging
+import os
+import os.path
+import shutil
+import subprocess
+import sys
+
+import chroot
+
+
+def get_artifacts(pattern):
+ results = subprocess.check_output(['gsutil.py', 'ls', pattern],
+ stderr=subprocess.STDOUT,
+ encoding='utf-8')
+ return sorted(l.strip() for l in results.splitlines())
+
+
+def get_crash_reproducers(working_dir):
+ results = []
+ for src in [
+ f for f in glob.glob('%s/*.c*' % working_dir)
+ if f.split('.')[-1] in ['c', 'cc', 'cpp']
+ ]:
+ script = '.'.join(src.split('.')[:-1]) + '.sh'
+ if not os.path.exists(script):
+ logging.warning('could not find the matching script of %s', src)
+ else:
+ results.append((src, script))
+ return results
+
+
+def submit_crash_to_forcey(forcey: str, temporary_directory: str,
+ buildbucket_id: str, url: str) -> None:
+ dest_dir = os.path.join(temporary_directory, buildbucket_id)
+ dest_file = os.path.join(dest_dir, os.path.basename(url))
+ logging.info('Downloading and submitting %r...', url)
+ subprocess.check_output(['gsutil.py', 'cp', url, dest_file],
+ stderr=subprocess.STDOUT)
+ subprocess.check_output(['tar', '-xJf', dest_file], cwd=dest_dir)
+ for src, script in get_crash_reproducers(dest_dir):
+ subprocess.check_output([
+ forcey, 'reduce', '-wait=false', '-note',
+ '%s:%s' % (url, src), '-sh_file', script, '-src_file', src
+ ])
+
+
+def main(argv):
+ chroot.VerifyOutsideChroot()
+ logging.basicConfig(
+ format='%(asctime)s: %(levelname)s: %(filename)s:%(lineno)d: %(message)s',
+ level=logging.INFO,
+ )
+ cur_dir = os.path.dirname(os.path.abspath(__file__))
+ parser = argparse.ArgumentParser(description=__doc__)
+ parser.add_argument(
+ '--4c', dest='forcey', required=True, help='Path to a 4c client binary')
+ parser.add_argument(
+ '--state_file',
+ default=os.path.join(cur_dir, 'chromeos-state.json'),
+ help='The path to the state file.')
+ parser.add_argument(
+ '--nocleanup',
+ action='store_false',
+ dest='cleanup',
+ help='Keep temporary files created after the script finishes.')
+ opts = parser.parse_args(argv)
+
+ state_file = os.path.abspath(opts.state_file)
+ os.makedirs(os.path.dirname(state_file), exist_ok=True)
+ temporary_directory = '/tmp/bisect_clang_crashes'
+ os.makedirs(temporary_directory, exist_ok=True)
+ urls = get_artifacts('gs://chromeos-toolchain-artifacts/clang-crash-diagnoses'
+ '/**/*clang_crash_diagnoses.tar.xz')
+ logging.info('%d crash URLs found', len(urls))
+
+ visited = {}
+ if os.path.exists(state_file):
+ buildbucket_ids = {url.split('/')[-2] for url in urls}
+ with open(state_file, encoding='utf-8') as f:
+ data = json.load(f)
+ visited = {k: v for k, v in data.items() if k in buildbucket_ids}
+ logging.info('Successfully loaded %d previously-submitted crashes',
+ len(visited))
+
+ try:
+ for url in urls:
+ splits = url.split('/')
+ buildbucket_id = splits[-2]
+ # Skip the builds that has been processed
+ if buildbucket_id in visited:
+ continue
+ submit_crash_to_forcey(
+ forcey=opts.forcey,
+ temporary_directory=temporary_directory,
+ buildbucket_id=buildbucket_id,
+ url=url,
+ )
+ visited[buildbucket_id] = url
+
+ exception_in_flight = False
+ except:
+ exception_in_flight = True
+ raise
+ finally:
+ if exception_in_flight:
+ # This is best-effort. If the machine powers off or similar, we'll just
+ # resubmit the same crashes, which is suboptimal, but otherwise
+ # acceptable.
+ logging.error('Something went wrong; attempting to save our work...')
+ else:
+ logging.info('Persisting state...')
+
+ tmp_state_file = state_file + '.tmp'
+ with open(tmp_state_file, 'w', encoding='utf-8') as f:
+ json.dump(visited, f, indent=2)
+ os.rename(tmp_state_file, state_file)
+
+ logging.info('State successfully persisted')
+
+ if opts.cleanup:
+ shutil.rmtree(temporary_directory)
+
+
+if __name__ == '__main__':
+ sys.exit(main(sys.argv[1:]))
diff --git a/llvm_tools/bisect_clang_crashes_unittest.py b/llvm_tools/bisect_clang_crashes_unittest.py
new file mode 100755
index 00000000..c9143450
--- /dev/null
+++ b/llvm_tools/bisect_clang_crashes_unittest.py
@@ -0,0 +1,92 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Tests for bisect_clang_crashes."""
+
+# pylint: disable=cros-logging-import
+import glob
+import logging
+import os.path
+import subprocess
+import unittest
+import unittest.mock as mock
+
+import bisect_clang_crashes
+
+
+class Test(unittest.TestCase):
+ """Tests for bisect_clang_crashes."""
+
+ class _SilencingFilter(object):
+ """Silences all log messages.
+
+ Also collects info about log messages that would've been emitted.
+ """
+
+ def __init__(self):
+ self.messages = []
+
+ def filter(self, record):
+ self.messages.append(record.getMessage())
+ return 0
+
+ @mock.patch.object(subprocess, 'check_output')
+ def test_get_artifacts(self, mock_gsutil_ls):
+ pattern = 'gs://chromeos-toolchain-artifacts/clang-crash-diagnoses/' \
+ '**/*clang_crash_diagnoses.tar.xz'
+ mock_gsutil_ls.return_value = 'artifact1\nartifact2\nartifact3'
+ results = bisect_clang_crashes.get_artifacts(pattern)
+ self.assertEqual(results, ['artifact1', 'artifact2', 'artifact3'])
+ mock_gsutil_ls.assert_called_once_with(['gsutil.py', 'ls', pattern],
+ stderr=subprocess.STDOUT,
+ encoding='utf-8')
+
+ @mock.patch.object(os.path, 'exists')
+ @mock.patch.object(glob, 'glob')
+ def test_get_crash_reproducers_succeed(self, mock_file_search,
+ mock_file_check):
+ working_dir = 'SomeDirectory'
+ mock_file_search.return_value = ['a.c', 'b.cpp', 'c.cc']
+ mock_file_check.side_effect = [True, True, True]
+ results = bisect_clang_crashes.get_crash_reproducers(working_dir)
+ mock_file_search.assert_called_once_with('%s/*.c*' % working_dir)
+ self.assertEqual(mock_file_check.call_count, 3)
+ self.assertEqual(mock_file_check.call_args_list[0], mock.call('a.sh'))
+ self.assertEqual(mock_file_check.call_args_list[1], mock.call('b.sh'))
+ self.assertEqual(mock_file_check.call_args_list[2], mock.call('c.sh'))
+ self.assertEqual(results, [('a.c', 'a.sh'), ('b.cpp', 'b.sh'),
+ ('c.cc', 'c.sh')])
+
+ @mock.patch.object(os.path, 'exists')
+ @mock.patch.object(glob, 'glob')
+ def test_get_crash_reproducers_no_matching_script(self, mock_file_search,
+ mock_file_check):
+
+ def silence_logging():
+ root = logging.getLogger()
+ filt = self._SilencingFilter()
+ root.addFilter(filt)
+ self.addCleanup(root.removeFilter, filt)
+ return filt
+
+ log_filter = silence_logging()
+ working_dir = 'SomeDirectory'
+ mock_file_search.return_value = ['a.c', 'b.cpp', 'c.cc']
+ mock_file_check.side_effect = [True, False, True]
+ results = bisect_clang_crashes.get_crash_reproducers(working_dir)
+ mock_file_search.assert_called_once_with('%s/*.c*' % working_dir)
+ self.assertEqual(mock_file_check.call_count, 3)
+ self.assertEqual(mock_file_check.call_args_list[0], mock.call('a.sh'))
+ self.assertEqual(mock_file_check.call_args_list[1], mock.call('b.sh'))
+ self.assertEqual(mock_file_check.call_args_list[2], mock.call('c.sh'))
+ self.assertEqual(results, [('a.c', 'a.sh'), ('c.cc', 'c.sh')])
+ self.assertTrue(
+ any('could not find the matching script of b.cpp' in x
+ for x in log_filter.messages), log_filter.messages)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/llvm_tools/cherrypick_cl.py b/llvm_tools/cherrypick_cl.py
index 78330d19..9e306725 100755
--- a/llvm_tools/cherrypick_cl.py
+++ b/llvm_tools/cherrypick_cl.py
@@ -170,32 +170,34 @@ def main():
help='Automatically create a CL if specified')
args = parser.parse_args()
- llvm_config = git_llvm_rev.LLVMConfig(
- remote='origin', dir=get_llvm_hash.GetAndUpdateLLVMProjectInLLVMTools())
-
llvm_symlink = chroot.ConvertChrootPathsToAbsolutePaths(
args.chroot_path,
chroot.GetChrootEbuildPaths(args.chroot_path, ['sys-devel/llvm']))[0]
+ llvm_symlink_dir = os.path.dirname(llvm_symlink)
+
+ git_status = subprocess.check_output(['git', 'status', '-s'],
+ cwd=llvm_symlink_dir,
+ encoding='utf-8')
+ if git_status:
+ raise ValueError('Uncommited changes detected in %s' %
+ os.path.dirname(os.path.dirname(llvm_symlink_dir)))
+
start_sha = args.start_sha
if start_sha == 'llvm':
- start_sha = parse_ebuild_for_assignment(
- os.path.dirname(llvm_symlink), 'LLVM_HASH')
+ start_sha = parse_ebuild_for_assignment(llvm_symlink_dir, 'LLVM_HASH')
elif start_sha == 'llvm-next':
- start_sha = parse_ebuild_for_assignment(
- os.path.dirname(llvm_symlink), 'LLVM_NEXT_HASH')
+ start_sha = parse_ebuild_for_assignment(llvm_symlink_dir, 'LLVM_NEXT_HASH')
logging.info('Base llvm hash == %s', start_sha)
+ llvm_config = git_llvm_rev.LLVMConfig(
+ remote='origin', dir=get_llvm_hash.GetAndUpdateLLVMProjectInLLVMTools())
+
start_sha = resolve_llvm_ref(llvm_config.dir, start_sha)
start_rev = git_llvm_rev.translate_sha_to_rev(llvm_config, start_sha)
if args.create_cl:
branch = 'cherry-pick'
- symlink = os.path.dirname(
- chroot.GetChrootEbuildPaths(args.chroot_path, ['sys-devel/llvm'])[0])
- symlink = chroot.ConvertChrootPathsToAbsolutePaths(args.chroot_path,
- [symlink])[0]
- symlink_dir = os.path.dirname(symlink)
- git.CreateBranch(symlink_dir, branch)
+ git.CreateBranch(llvm_symlink_dir, branch)
symlinks_to_uprev = []
commit_messages = [
'llvm: cherry-pick CLs from upstream\n',
@@ -221,9 +223,9 @@ def main():
relative_patches_dir = 'cherry' if package == 'llvm' else ''
patches_dir = os.path.join(symlink_dir, 'files', relative_patches_dir)
logging.info('Cherrypicking %s (%s) into %s', rev, sha, package)
+
add_cherrypick(patches_json_path, patches_dir, relative_patches_dir,
start_rev, llvm_config.dir, rev, sha, package)
-
if args.create_cl:
symlinks_to_uprev.extend(symlinks)
commit_messages.extend([
@@ -240,8 +242,8 @@ def main():
for symlink in symlinks_to_uprev:
update_chromeos_llvm_hash.UprevEbuildSymlink(symlink)
subprocess.check_output(['git', 'add', '--all'], cwd=symlink_dir)
- git.UploadChanges(symlink_dir, branch, commit_messages)
- git.DeleteBranch(symlink_dir, branch)
+ git.UploadChanges(llvm_symlink_dir, branch, commit_messages)
+ git.DeleteBranch(llvm_symlink_dir, branch)
if __name__ == '__main__':
diff --git a/llvm_tools/fetch_cros_sdk_rolls.py b/llvm_tools/fetch_cros_sdk_rolls.py
new file mode 100755
index 00000000..42af678a
--- /dev/null
+++ b/llvm_tools/fetch_cros_sdk_rolls.py
@@ -0,0 +1,112 @@
+#!/usr/bin/env python3
+# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Gets info about completed chromiumos-sdk runs.
+
+Moreover, this script exists to get versions of published sdk tarballs in
+gs://chromiumos-sdk/. The hope is that it'll help answer the question "when did
+the toolchain ebuild ${x} go live?"
+"""
+
+# pylint: disable=cros-logging-import
+
+import argparse
+import json
+import logging
+import os
+import shutil
+import subprocess
+import sys
+import tempfile
+from typing import Dict, List
+from pathlib import Path
+
+
+def fetch_all_sdk_manifest_paths() -> List[str]:
+ """Fetches all paths of SDK manifests; newer = later in the return value."""
+ results = subprocess.run(
+ ['gsutil', 'ls', 'gs://chromiumos-sdk/cros-sdk-20??.*.Manifest'],
+ check=True,
+ stdout=subprocess.PIPE,
+ encoding='utf-8',
+ ).stdout
+ # These are named so that sorted order == newest last.
+ return sorted(x.strip() for x in results.splitlines())
+
+
+def fetch_manifests_into(into_dir: Path, manifests: List[str]):
+ # Wrap this in a `try` block because gsutil likes to print to stdout *and*
+ # stderr even on success, so we silence them & only print on failure.
+ try:
+ subprocess.run(
+ [
+ 'gsutil',
+ '-m',
+ 'cp',
+ '-I',
+ str(into_dir),
+ ],
+ check=True,
+ input='\n'.join(manifests),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ encoding='utf-8',
+ )
+ except subprocess.CalledProcessError as e:
+ logging.exception('gsutil failed; output:\n%s', e.stdout)
+
+
+def load_manifest_versions(manifest: Path) -> Dict[str, str]:
+ with manifest.open(encoding='utf-8') as f:
+ raw_versions = json.load(f)
+
+ # We get a dict of list of lists of versions and some other metadata, e.g.
+ # {"foo/bar": [["1.2.3", {}]]}
+ # Trim out the metadata.
+ return {k: v[0][0] for k, v in raw_versions['packages'].items()}
+
+
+def main():
+ parser = argparse.ArgumentParser(
+ description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
+ parser.add_argument(
+ '-d', '--debug', action='store_true', help='Emit debugging output')
+ parser.add_argument(
+ '-n',
+ '--number',
+ type=int,
+ default=20,
+ help='Number of recent manifests to fetch info about. 0 means unlimited.')
+ args = parser.parse_args()
+
+ is_debug = args.debug
+ logging.basicConfig(level=logging.DEBUG if is_debug else logging.INFO)
+
+ logging.debug('Fetching SDK manifests')
+ manifest_paths = fetch_all_sdk_manifest_paths()
+ logging.debug('%d SDK manifests fetched', len(manifest_paths))
+
+ number = args.number
+ if number:
+ manifest_paths = manifest_paths[-number:]
+
+ tempdir = Path(tempfile.mkdtemp(prefix='cros-sdk-rolls'))
+ try:
+ logging.debug('Working in tempdir %r', tempdir)
+ fetch_manifests_into(tempdir, manifest_paths)
+
+ for path in manifest_paths:
+ basename = os.path.basename(path)
+ versions = load_manifest_versions(tempdir.joinpath(basename))
+ print(f'{basename}: {versions["sys-devel/llvm"]}')
+ finally:
+ if is_debug:
+ logging.debug('Keeping around tempdir %r to aid debugging', tempdir)
+ else:
+ shutil.rmtree(tempdir)
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/llvm_tools/get_llvm_hash.py b/llvm_tools/get_llvm_hash.py
index a5b5429e..4c479962 100755
--- a/llvm_tools/get_llvm_hash.py
+++ b/llvm_tools/get_llvm_hash.py
@@ -17,7 +17,8 @@ import tempfile
from contextlib import contextmanager
import git_llvm_rev
-from subprocess_helpers import CheckCommand, check_output
+from subprocess_helpers import CheckCommand
+from subprocess_helpers import check_output
_LLVM_GIT_URL = ('https://chromium.googlesource.com/external/github.com/llvm'
'/llvm-project')
@@ -39,7 +40,7 @@ def GetVersionFrom(src_dir, git_hash):
version = git_llvm_rev.translate_sha_to_rev(
git_llvm_rev.LLVMConfig(remote='origin', dir=src_dir), git_hash)
# Note: branches aren't supported
- assert version.branch == 'master', version.branch
+ assert version.branch == git_llvm_rev.MAIN_BRANCH, version.branch
return version.number
@@ -59,7 +60,7 @@ def GetGitHashFrom(src_dir, version):
return git_llvm_rev.translate_rev_to_sha(
git_llvm_rev.LLVMConfig(remote='origin', dir=src_dir),
- git_llvm_rev.Rev(branch='master', number=version))
+ git_llvm_rev.Rev(branch=git_llvm_rev.MAIN_BRANCH, number=version))
@contextmanager
@@ -85,13 +86,10 @@ def CreateTempLLVMRepo(temp_dir):
"""
abs_path_to_llvm_project_dir = GetAndUpdateLLVMProjectInLLVMTools()
-
- add_worktree_cmd = [
+ CheckCommand([
'git', '-C', abs_path_to_llvm_project_dir, 'worktree', 'add', '--detach',
- temp_dir, 'master'
- ]
-
- CheckCommand(add_worktree_cmd)
+ temp_dir, git_llvm_rev.MAIN_BRANCH
+ ])
try:
yield temp_dir
@@ -117,7 +115,7 @@ def GetAndUpdateLLVMProjectInLLVMTools():
Raises:
ValueError: LLVM repo (in 'llvm-project-copy' dir.) has changes or failed to
- checkout to master or failed to fetch from chromium mirror of LLVM.
+ checkout to main or failed to fetch from chromium mirror of LLVM.
"""
abs_path_to_llvm_tools_dir = os.path.dirname(os.path.abspath(__file__))
@@ -127,7 +125,8 @@ def GetAndUpdateLLVMProjectInLLVMTools():
if not os.path.isdir(abs_path_to_llvm_project_dir):
print(
- 'Checking out LLVM from scratch. This could take a while...',
+ 'Checking out LLVM from scratch. This could take a while...\n'
+ '(This should only need to be done once, though.)',
file=sys.stderr)
os.mkdir(abs_path_to_llvm_project_dir)
@@ -143,15 +142,11 @@ def GetAndUpdateLLVMProjectInLLVMTools():
raise ValueError('LLVM repo in %s has changes, please remove.' %
abs_path_to_llvm_project_dir)
- checkout_to_master_cmd = [
- 'git', '-C', abs_path_to_llvm_project_dir, 'checkout', 'master'
- ]
-
- CheckCommand(checkout_to_master_cmd)
-
- update_master_cmd = ['git', '-C', abs_path_to_llvm_project_dir, 'pull']
-
- CheckCommand(update_master_cmd)
+ CheckCommand([
+ 'git', '-C', abs_path_to_llvm_project_dir, 'checkout',
+ git_llvm_rev.MAIN_BRANCH
+ ])
+ CheckCommand(['git', '-C', abs_path_to_llvm_project_dir, 'pull'])
return abs_path_to_llvm_project_dir
@@ -298,14 +293,9 @@ class LLVMHash(object):
def GetTopOfTrunkGitHash(self):
"""Gets the latest git hash from top of trunk of LLVM."""
- path_to_master_branch = 'refs/heads/master'
-
- llvm_tot_git_hash_cmd = [
- 'git', 'ls-remote', _LLVM_GIT_URL, path_to_master_branch
- ]
-
- llvm_tot_git_hash = check_output(llvm_tot_git_hash_cmd)
-
+ path_to_main_branch = 'refs/heads/master'
+ llvm_tot_git_hash = check_output(
+ ['git', 'ls-remote', _LLVM_GIT_URL, path_to_main_branch])
return llvm_tot_git_hash.rstrip().split()[0]
diff --git a/llvm_tools/get_llvm_hash_unittest.py b/llvm_tools/get_llvm_hash_unittest.py
index c828f433..2e56aed5 100755
--- a/llvm_tools/get_llvm_hash_unittest.py
+++ b/llvm_tools/get_llvm_hash_unittest.py
@@ -8,12 +8,11 @@
from __future__ import print_function
-import get_llvm_hash
import subprocess
import unittest
import unittest.mock as mock
-import test_helpers
+import get_llvm_hash
from get_llvm_hash import LLVMHash
# We grab protected stuff from get_llvm_hash. That's OK.
@@ -87,7 +86,7 @@ class TestGetLLVMHash(unittest.TestCase):
@mock.patch.object(subprocess, 'check_output')
def testSuccessfullyGetGitHashFromToTOfLLVM(self, mock_check_output):
- mock_check_output.return_value = 'a123testhash1 path/to/master\n'
+ mock_check_output.return_value = 'a123testhash1 path/to/main\n'
self.assertEqual(LLVMHash().GetTopOfTrunkGitHash(), 'a123testhash1')
mock_check_output.assert_called_once()
diff --git a/llvm_tools/git_llvm_rev.py b/llvm_tools/git_llvm_rev.py
index 8eefcdce..07209f1e 100755
--- a/llvm_tools/git_llvm_rev.py
+++ b/llvm_tools/git_llvm_rev.py
@@ -18,6 +18,8 @@ import subprocess
import sys
import typing as t
+MAIN_BRANCH = 'master'
+
# Note that after base_llvm_sha, we reach The Wild West(TM) of commits.
# So reasonable input that could break us includes:
#
@@ -52,9 +54,9 @@ class Rev(t.NamedTuple('Rev', (('branch', str), ('number', int)))):
# pairs.
#
# We support r${commits_since_base_commit} as shorthand for
- # (master, r${commits_since_base_commit}).
+ # (main, r${commits_since_base_commit}).
if rev.startswith('r'):
- branch_name = 'master'
+ branch_name = MAIN_BRANCH
rev_string = rev[1:]
else:
match = re.match(r'\((.+), r(\d+)\)', rev)
@@ -67,7 +69,7 @@ class Rev(t.NamedTuple('Rev', (('branch', str), ('number', int)))):
def __str__(self) -> str:
branch_name, number = self
- if branch_name == 'master':
+ if branch_name == MAIN_BRANCH:
return 'r%d' % number
return '(%s, r%d)' % (branch_name, number)
@@ -141,7 +143,7 @@ def translate_sha_to_rev(llvm_config: LLVMConfig, sha_or_ref: str) -> Rev:
cwd=llvm_config.dir,
)
count = int(result.strip())
- return Rev(branch='master', number=count + base_llvm_revision)
+ return Rev(branch=MAIN_BRANCH, number=count + base_llvm_revision)
# Otherwise, either:
# - |merge_base| is |sha| (we have a guaranteed llvm-svn number on |sha|)
@@ -150,7 +152,7 @@ def translate_sha_to_rev(llvm_config: LLVMConfig, sha_or_ref: str) -> Rev:
merge_base_number = translate_prebase_sha_to_rev_number(
llvm_config, merge_base)
if merge_base == sha:
- return Rev(branch='master', number=merge_base_number)
+ return Rev(branch=MAIN_BRANCH, number=merge_base_number)
distance_from_base = check_output(
[
@@ -270,7 +272,7 @@ def translate_rev_to_sha(llvm_config: LLVMConfig, rev: Rev) -> str:
"""
branch, number = rev
- if branch == 'master':
+ if branch == MAIN_BRANCH:
if number < base_llvm_revision:
return translate_prebase_rev_to_sha(llvm_config, rev)
base_sha = base_llvm_sha
diff --git a/llvm_tools/git_llvm_rev_test.py b/llvm_tools/git_llvm_rev_test.py
index 1e38f589..74280c5d 100755
--- a/llvm_tools/git_llvm_rev_test.py
+++ b/llvm_tools/git_llvm_rev_test.py
@@ -6,12 +6,11 @@
"""Tests for git_llvm_rev."""
-from __future__ import print_function
-
import unittest
import git_llvm_rev
import llvm_project
+from git_llvm_rev import MAIN_BRANCH
def get_llvm_config() -> git_llvm_rev.LLVMConfig:
@@ -32,17 +31,17 @@ class Test(unittest.TestCase):
def test_sha_to_rev_on_base_sha_works(self) -> None:
sha = self.rev_to_sha_with_round_trip(
git_llvm_rev.Rev(
- branch='master', number=git_llvm_rev.base_llvm_revision))
+ branch=MAIN_BRANCH, number=git_llvm_rev.base_llvm_revision))
self.assertEqual(sha, git_llvm_rev.base_llvm_sha)
def test_sha_to_rev_prior_to_base_rev_works(self) -> None:
sha = self.rev_to_sha_with_round_trip(
- git_llvm_rev.Rev(branch='master', number=375000))
+ git_llvm_rev.Rev(branch=MAIN_BRANCH, number=375000))
self.assertEqual(sha, '2f6da767f13b8fd81f840c211d405fea32ac9db7')
def test_sha_to_rev_after_base_rev_works(self) -> None:
sha = self.rev_to_sha_with_round_trip(
- git_llvm_rev.Rev(branch='master', number=375506))
+ git_llvm_rev.Rev(branch=MAIN_BRANCH, number=375506))
self.assertEqual(sha, '3bf7fddeb05655d9baed4cc69e13535c677ed1dd')
def test_llvm_svn_parsing_runs_ignore_reverts(self) -> None:
@@ -51,18 +50,19 @@ class Test(unittest.TestCase):
# Commit which performed the revert
sha = self.rev_to_sha_with_round_trip(
- git_llvm_rev.Rev(branch='master', number=374895))
+ git_llvm_rev.Rev(branch=MAIN_BRANCH, number=374895))
self.assertEqual(sha, '1731fc88d1fa1fa55edd056db73a339b415dd5d6')
# Commit that was reverted
sha = self.rev_to_sha_with_round_trip(
- git_llvm_rev.Rev(branch='master', number=374841))
+ git_llvm_rev.Rev(branch=MAIN_BRANCH, number=374841))
self.assertEqual(sha, '2a1386c81de504b5bda44fbecf3f7b4cdfd748fc')
def test_imaginary_revs_raise(self) -> None:
with self.assertRaises(ValueError) as r:
git_llvm_rev.translate_rev_to_sha(
- get_llvm_config(), git_llvm_rev.Rev(branch='master', number=9999999))
+ get_llvm_config(),
+ git_llvm_rev.Rev(branch=MAIN_BRANCH, number=9999999))
self.assertIn('Try updating your tree?', str(r.exception))
@@ -71,15 +71,15 @@ class Test(unittest.TestCase):
# properties about it.
merge_sha_rev_number = 4496 + git_llvm_rev.base_llvm_revision
sha = self.rev_to_sha_with_round_trip(
- git_llvm_rev.Rev(branch='master', number=merge_sha_rev_number))
+ git_llvm_rev.Rev(branch=MAIN_BRANCH, number=merge_sha_rev_number))
self.assertEqual(sha, '0f0d0ed1c78f1a80139a1f2133fad5284691a121')
sha = self.rev_to_sha_with_round_trip(
- git_llvm_rev.Rev(branch='master', number=merge_sha_rev_number - 1))
+ git_llvm_rev.Rev(branch=MAIN_BRANCH, number=merge_sha_rev_number - 1))
self.assertEqual(sha, '6f635f90929da9545dd696071a829a1a42f84b30')
sha = self.rev_to_sha_with_round_trip(
- git_llvm_rev.Rev(branch='master', number=merge_sha_rev_number + 1))
+ git_llvm_rev.Rev(branch=MAIN_BRANCH, number=merge_sha_rev_number + 1))
self.assertEqual(sha, '199700a5cfeedf227619f966aa3125cef18bc958')
# NOTE: The below tests have _zz_ in their name as an optimization. Iterating
@@ -101,11 +101,11 @@ class Test(unittest.TestCase):
backing_sha = 'c89a3d78f43d81b9cff7b9248772ddf14d21b749'
sha = self.rev_to_sha_with_round_trip(
- git_llvm_rev.Rev(branch='master', number=rev_number))
+ git_llvm_rev.Rev(branch=MAIN_BRANCH, number=rev_number))
self.assertEqual(sha, backing_sha)
- # Note that this won't round-trip: since this commit is on the master
- # branch, we'll pick master for this. That's fine
+ # Note that this won't round-trip: since this commit is on the main
+ # branch, we'll pick main for this. That's fine.
sha = git_llvm_rev.translate_rev_to_sha(
get_llvm_config(),
git_llvm_rev.Rev(branch='release/9.x', number=rev_number))
@@ -113,7 +113,7 @@ class Test(unittest.TestCase):
def test_zz_branch_revs_work_after_merge_points(self) -> None:
# Picking the commit on the 9.x branch after the merge-base for that +
- # master. Note that this is where llvm-svn numbers should diverge from
+ # main. Note that this is where llvm-svn numbers should diverge from
# ours, and are therefore untrustworthy. The commit for this *does* have a
# different `llvm-svn:` string than we should have.
sha = self.rev_to_sha_with_round_trip(
diff --git a/llvm_tools/llvm_bisection.py b/llvm_tools/llvm_bisection.py
index 2772ca48..c8d694cd 100755
--- a/llvm_tools/llvm_bisection.py
+++ b/llvm_tools/llvm_bisection.py
@@ -13,10 +13,12 @@ import enum
import errno
import json
import os
+import subprocess
import sys
import chroot
import get_llvm_hash
+import git_llvm_rev
import modify_a_tryjob
import update_tryjob_status
@@ -28,11 +30,6 @@ class BisectionExitStatus(enum.Enum):
BISECTION_COMPLETE = 126
-def is_file_and_json(json_file):
- """Validates that the file exists and is a JSON file."""
- return os.path.isfile(json_file) and json_file.endswith('.json')
-
-
def GetCommandLineArgs():
"""Parses the command line for the command line arguments."""
@@ -121,30 +118,27 @@ def GetCommandLineArgs():
help='display contents of a command to the terminal '
'(default: %(default)s)')
+ # Add argument for whether to display command contents to `stdout`.
+ parser.add_argument(
+ '--nocleanup',
+ action='store_false',
+ dest='cleanup',
+ help='Abandon CLs created for bisectoin')
+
args_output = parser.parse_args()
assert args_output.start_rev < args_output.end_rev, (
- 'Start revision %d is >= end revision %d' % (args_output.start_rev,
- args_output.end_rev))
+ 'Start revision %d is >= end revision %d' %
+ (args_output.start_rev, args_output.end_rev))
if args_output.last_tested and not args_output.last_tested.endswith('.json'):
- raise ValueError(
- 'Filed provided %s does not end in ".json"' % args_output.last_tested)
+ raise ValueError('Filed provided %s does not end in ".json"' %
+ args_output.last_tested)
return args_output
-def _ValidateStartAndEndAgainstJSONStartAndEnd(start, end, json_start,
- json_end):
- """Valides that the command line arguments are the same as the JSON."""
-
- if start != json_start or end != json_end:
- raise ValueError('The start %d or the end %d version provided is '
- 'different than "start" %d or "end" %d in the .JSON '
- 'file' % (start, end, json_start, json_end))
-
-
-def GetStartAndEndRevision(start, end, tryjobs):
+def GetRemainingRange(start, end, tryjobs):
"""Gets the start and end intervals in 'json_file'.
Args:
@@ -230,145 +224,56 @@ def GetStartAndEndRevision(start, end, tryjobs):
return good_rev, bad_rev, pending_revisions, skip_revisions
-def GetRevisionsBetweenBisection(start, end, parallel, src_path,
- pending_revisions, skip_revisions):
- """Gets the revisions between 'start' and 'end'.
-
- Sometimes, the LLVM source tree's revisions do not increment by 1 (there is
- a jump), so need to construct a list of all revisions that are NOT missing
- between 'start' and 'end'. Then, the step amount (i.e. length of the list
- divided by ('parallel' + 1)) will be used for indexing into the list.
-
- Args:
- start: The start revision.
- end: The end revision.
- parallel: The number of tryjobs to create between 'start' and 'end'.
- src_path: The absolute path to the LLVM source tree to use.
- pending_revisions: A set containing 'pending' revisions that are between
- 'start' and 'end'.
- skip_revisions: A set containing revisions between 'start' and 'end' that
- are to be skipped.
-
- Returns:
- A list of revisions between 'start' and 'end'.
- """
-
- valid_revisions = []
-
- # Start at ('start' + 1) because 'start' is the good revision.
- #
- # FIXME: Searching for each revision from ('start' + 1) up to 'end' in the
- # LLVM source tree is a quadratic algorithm. It's a good idea to optimize
- # this.
- for cur_revision in range(start + 1, end):
- try:
- if cur_revision not in pending_revisions and \
- cur_revision not in skip_revisions:
- # Verify that the current revision exists by finding its corresponding
- # git hash in the LLVM source tree.
- get_llvm_hash.GetGitHashFrom(src_path, cur_revision)
- valid_revisions.append(cur_revision)
- except ValueError:
- # Could not find the git hash for the current revision.
- continue
-
- # ('parallel' + 1) so that the last revision in the list is not close to
- # 'end' (have a bit more coverage).
- index_step = len(valid_revisions) // (parallel + 1)
-
- if not index_step:
- index_step = 1
-
- result = [valid_revisions[index] \
- for index in range(0, len(valid_revisions), index_step)]
-
- return result
-
-
-def GetRevisionsListAndHashList(start, end, parallel, src_path,
- pending_revisions, skip_revisions):
+def GetCommitsBetween(start, end, parallel, src_path, pending_revisions,
+ skip_revisions):
"""Determines the revisions between start and end."""
- new_llvm = get_llvm_hash.LLVMHash()
+ with get_llvm_hash.LLVMHash().CreateTempDirectory() as temp_dir:
+ # We have guaranteed contiguous revision numbers after this,
+ # and that guarnatee simplifies things considerably, so we don't
+ # support anything before it.
+ assert start >= git_llvm_rev.base_llvm_revision, f'{start} was too long ago'
- with new_llvm.CreateTempDirectory() as temp_dir:
with get_llvm_hash.CreateTempLLVMRepo(temp_dir) as new_repo:
if not src_path:
src_path = new_repo
-
- # Get a list of revisions between start and end.
- revisions = GetRevisionsBetweenBisection(
- start, end, parallel, src_path, pending_revisions, skip_revisions)
-
+ index_step = (end - (start + 1)) // (parallel + 1)
+ if not index_step:
+ index_step = 1
+ revisions = [
+ rev for rev in range(start + 1, end, index_step)
+ if rev not in pending_revisions and rev not in skip_revisions
+ ]
git_hashes = [
get_llvm_hash.GetGitHashFrom(src_path, rev) for rev in revisions
]
+ return revisions, git_hashes
- return revisions, git_hashes
-
-
-def DieWithNoRevisionsError(start, end, skip_revisions, pending_revisions):
- """Raises a ValueError exception with useful information."""
-
- no_revisions_message = ('No revisions between start %d and end '
- '%d to create tryjobs' % (start, end))
- if pending_revisions:
- no_revisions_message += '\nThe following tryjobs are pending:\n' \
- + '\n'.join(str(rev) for rev in pending_revisions)
-
- if skip_revisions:
- no_revisions_message += '\nThe following tryjobs were skipped:\n' \
- + '\n'.join(str(rev) for rev in skip_revisions)
-
- raise ValueError(no_revisions_message)
-
-
-def CheckForExistingTryjobsInRevisionsToLaunch(revisions, jobs):
- """Checks if a revision in 'revisions' exists in 'jobs' list."""
-
- for rev in revisions:
- if update_tryjob_status.FindTryjobIndex(rev, jobs) is not None:
- raise ValueError('Revision %d exists already in "jobs"' % rev)
-
-
-def UpdateBisection(revisions, git_hashes, bisect_contents, last_tested,
- update_packages, chroot_path, patch_metadata_file,
- extra_change_lists, options, builder, verbose):
+def Bisect(revisions, git_hashes, bisect_state, last_tested, update_packages,
+ chroot_path, patch_metadata_file, extra_change_lists, options,
+ builder, verbose):
"""Adds tryjobs and updates the status file with the new tryjobs."""
try:
for svn_revision, git_hash in zip(revisions, git_hashes):
- tryjob_dict = modify_a_tryjob.AddTryjob(
- update_packages, git_hash, svn_revision, chroot_path,
- patch_metadata_file, extra_change_lists, options, builder, verbose,
- svn_revision)
+ tryjob_dict = modify_a_tryjob.AddTryjob(update_packages, git_hash,
+ svn_revision, chroot_path,
+ patch_metadata_file,
+ extra_change_lists, options,
+ builder, verbose, svn_revision)
- bisect_contents['jobs'].append(tryjob_dict)
+ bisect_state['jobs'].append(tryjob_dict)
finally:
# Do not want to lose progress if there is an exception.
if last_tested:
new_file = '%s.new' % last_tested
with open(new_file, 'w') as json_file:
- json.dump(bisect_contents, json_file, indent=4, separators=(',', ': '))
+ json.dump(bisect_state, json_file, indent=4, separators=(',', ': '))
os.rename(new_file, last_tested)
-def _NoteCompletedBisection(last_tested, src_path, end):
- """Prints that bisection is complete."""
-
- print('Finished bisecting for %s' % last_tested)
-
- if src_path:
- bad_llvm_hash = get_llvm_hash.GetGitHashFrom(src_path, end)
- else:
- bad_llvm_hash = get_llvm_hash.LLVMHash().GetLLVMHash(end)
-
- print(
- 'The bad revision is %d and its commit hash is %s' % (end, bad_llvm_hash))
-
-
def LoadStatusFile(last_tested, start, end):
"""Loads the status file for bisection."""
@@ -383,37 +288,36 @@ def LoadStatusFile(last_tested, start, end):
def main(args_output):
- """Bisects LLVM based off of a .JSON file.
+ """Bisects LLVM commits.
Raises:
AssertionError: The script was run inside the chroot.
"""
chroot.VerifyOutsideChroot()
-
update_packages = [
'sys-devel/llvm', 'sys-libs/compiler-rt', 'sys-libs/libcxx',
'sys-libs/libcxxabi', 'sys-libs/llvm-libunwind'
]
-
patch_metadata_file = 'PATCHES.json'
-
start = args_output.start_rev
end = args_output.end_rev
- bisect_contents = LoadStatusFile(args_output.last_tested, start, end)
-
- _ValidateStartAndEndAgainstJSONStartAndEnd(
- start, end, bisect_contents['start'], bisect_contents['end'])
+ bisect_state = LoadStatusFile(args_output.last_tested, start, end)
+ if start != bisect_state['start'] or end != bisect_state['end']:
+ raise ValueError(f'The start {start} or the end {end} version provided is '
+ f'different than "start" {bisect_state["start"]} or "end" '
+ f'{bisect_state["end"]} in the .JSON file')
# Pending and skipped revisions are between 'start_revision' and
# 'end_revision'.
start_revision, end_revision, pending_revisions, skip_revisions = \
- GetStartAndEndRevision(start, end, bisect_contents['jobs'])
+ GetRemainingRange(start, end, bisect_state['jobs'])
- revisions, git_hashes = GetRevisionsListAndHashList(
- start_revision, end_revision, args_output.parallel, args_output.src_path,
- pending_revisions, skip_revisions)
+ revisions, git_hashes = GetCommitsBetween(start_revision, end_revision,
+ args_output.parallel,
+ args_output.src_path,
+ pending_revisions, skip_revisions)
# No more revisions between 'start_revision' and 'end_revision', so
# bisection is complete.
@@ -421,39 +325,61 @@ def main(args_output):
# This is determined by finding all valid revisions between 'start_revision'
# and 'end_revision' and that are NOT in the 'pending' and 'skipped' set.
if not revisions:
- # Successfully completed bisection where there are 2 cases:
- # 1) 'start_revision' and 'end_revision' are back-to-back (example:
- # 'start_revision' is 369410 and 'end_revision' is 369411).
- #
- # 2) 'start_revision' and 'end_revision' are NOT back-to-back, so there must
- # be tryjobs in between which are labeled as 'skip' for their 'status'
- # value.
- #
- # In either case, there are no 'pending' jobs.
- if not pending_revisions:
- _NoteCompletedBisection(args_output.last_tested, args_output.src_path,
- end_revision)
+ if pending_revisions:
+ # Some tryjobs are not finished which may change the actual bad
+ # commit/revision when those tryjobs are finished.
+ no_revisions_message = (f'No revisions between start {start_revision} '
+ f'and end {end_revision} to create tryjobs\n')
- if skip_revisions:
- skip_revisions_message = ('\nThe following revisions were skipped:\n' +
- '\n'.join(str(rev) for rev in skip_revisions))
+ if pending_revisions:
+ no_revisions_message += (
+ 'The following tryjobs are pending:\n' +
+ '\n'.join(str(rev) for rev in pending_revisions) + '\n')
- print(skip_revisions_message)
-
- return BisectionExitStatus.BISECTION_COMPLETE.value
-
- # Some tryjobs are not finished which may change the actual bad
- # commit/revision when those tryjobs are finished.
- DieWithNoRevisionsError(start_revision, end_revision, skip_revisions,
- pending_revisions)
-
- CheckForExistingTryjobsInRevisionsToLaunch(revisions, bisect_contents['jobs'])
+ if skip_revisions:
+ no_revisions_message += ('The following tryjobs were skipped:\n' +
+ '\n'.join(str(rev) for rev in skip_revisions) +
+ '\n')
+
+ raise ValueError(no_revisions_message)
+
+ print(f'Finished bisecting for {args_output.last_tested}')
+ if args_output.src_path:
+ bad_llvm_hash = get_llvm_hash.GetGitHashFrom(args_output.src_path,
+ end_revision)
+ else:
+ bad_llvm_hash = get_llvm_hash.LLVMHash().GetLLVMHash(end_revision)
+ print(f'The bad revision is {end_revision} and its commit hash is '
+ f'{bad_llvm_hash}')
+ if skip_revisions:
+ skip_revisions_message = ('\nThe following revisions were skipped:\n' +
+ '\n'.join(str(rev) for rev in skip_revisions))
+ print(skip_revisions_message)
+
+ if args_output.cleanup:
+ # Abondon all the CLs created for bisection
+ gerrit = os.path.join(args_output.chroot_path, 'chromite/bin/gerrit')
+ for build in bisect_state['jobs']:
+ try:
+ subprocess.check_output([gerrit, 'abandon', build['cl']],
+ stderr=subprocess.STDOUT,
+ encoding='utf-8')
+ except subprocess.CalledProcessError as err:
+ # the CL may have been abandoned
+ if 'chromite.lib.gob_util.GOBError' not in err.output:
+ raise
+
+ return BisectionExitStatus.BISECTION_COMPLETE.value
- UpdateBisection(revisions, git_hashes, bisect_contents,
- args_output.last_tested, update_packages,
- args_output.chroot_path, patch_metadata_file,
- args_output.extra_change_lists, args_output.options,
- args_output.builder, args_output.verbose)
+ for rev in revisions:
+ if update_tryjob_status.FindTryjobIndex(rev,
+ bisect_state['jobs']) is not None:
+ raise ValueError(f'Revision {rev} exists already in "jobs"')
+
+ Bisect(revisions, git_hashes, bisect_state, args_output.last_tested,
+ update_packages, args_output.chroot_path, patch_metadata_file,
+ args_output.extra_change_lists, args_output.options,
+ args_output.builder, args_output.verbose)
if __name__ == '__main__':
diff --git a/llvm_tools/llvm_bisection_unittest.py b/llvm_tools/llvm_bisection_unittest.py
index e730293b..a40770a5 100755
--- a/llvm_tools/llvm_bisection_unittest.py
+++ b/llvm_tools/llvm_bisection_unittest.py
@@ -11,50 +11,57 @@
from __future__ import print_function
import json
+import os
+import subprocess
import unittest
import unittest.mock as mock
import chroot
import get_llvm_hash
+import git_llvm_rev
import llvm_bisection
import modify_a_tryjob
import test_helpers
-import update_tryjob_status
class LLVMBisectionTest(unittest.TestCase):
"""Unittests for LLVM bisection."""
- def testStartAndEndDoNotMatchJsonStartAndEnd(self):
+ def testGetRemainingRangePassed(self):
start = 100
end = 150
- json_start = 110
- json_end = 150
-
- # Verify the exception is raised when the start and end revision for LLVM
- # bisection do not match the .JSON's 'start' and 'end' values.
- with self.assertRaises(ValueError) as err:
- llvm_bisection._ValidateStartAndEndAgainstJSONStartAndEnd(
- start, end, json_start, json_end)
-
- expected_error_message = ('The start %d or the end %d version provided is '
- 'different than "start" %d or "end" %d in the '
- '.JSON file' % (start, end, json_start, json_end))
-
- self.assertEqual(str(err.exception), expected_error_message)
-
- def testStartAndEndMatchJsonStartAndEnd(self):
- start = 100
- end = 150
+ test_tryjobs = [{
+ 'rev': 110,
+ 'status': 'good',
+ 'link': 'https://some_tryjob_1_url.com'
+ }, {
+ 'rev': 120,
+ 'status': 'good',
+ 'link': 'https://some_tryjob_2_url.com'
+ }, {
+ 'rev': 130,
+ 'status': 'pending',
+ 'link': 'https://some_tryjob_3_url.com'
+ }, {
+ 'rev': 135,
+ 'status': 'skip',
+ 'link': 'https://some_tryjob_4_url.com'
+ }, {
+ 'rev': 140,
+ 'status': 'bad',
+ 'link': 'https://some_tryjob_5_url.com'
+ }]
- json_start = 100
- json_end = 150
+ # Tuple consists of the new good revision, the new bad revision, a set of
+ # 'pending' revisions, and a set of 'skip' revisions.
+ expected_revisions_tuple = 120, 140, {130}, {135}
- llvm_bisection._ValidateStartAndEndAgainstJSONStartAndEnd(
- start, end, json_start, json_end)
+ self.assertEqual(
+ llvm_bisection.GetRemainingRange(start, end, test_tryjobs),
+ expected_revisions_tuple)
- def testTryjobStatusIsMissing(self):
+ def testGetRemainingRangeFailedWithMissingStatus(self):
start = 100
end = 150
@@ -72,18 +79,14 @@ class LLVMBisectionTest(unittest.TestCase):
'link': 'https://some_tryjob_3_url.com'
}]
- # Verify the exception is raised when a tryjob does not have a value for
- # the 'status' key or the 'status' key is missing.
with self.assertRaises(ValueError) as err:
- llvm_bisection.GetStartAndEndRevision(start, end, test_tryjobs)
+ llvm_bisection.GetRemainingRange(start, end, test_tryjobs)
- expected_error_message = (
- '"status" is missing or has no value, please '
- 'go to %s and update it' % test_tryjobs[1]['link'])
+ error_message = ('"status" is missing or has no value, please '
+ 'go to %s and update it' % test_tryjobs[1]['link'])
+ self.assertEqual(str(err.exception), error_message)
- self.assertEqual(str(err.exception), expected_error_message)
-
- def testGoodRevisionGreaterThanBadRevision(self):
+ def testGetRemainingRangeFailedWithInvalidRange(self):
start = 100
end = 150
@@ -101,206 +104,68 @@ class LLVMBisectionTest(unittest.TestCase):
'link': 'https://some_tryjob_3_url.com'
}]
- # Verify the exception is raised when the new 'start' revision is greater
- # than the new 'bad' revision for bisection (i.e. bisection is broken).
with self.assertRaises(AssertionError) as err:
- llvm_bisection.GetStartAndEndRevision(start, end, test_tryjobs)
+ llvm_bisection.GetRemainingRange(start, end, test_tryjobs)
- expected_error_message = (
- 'Bisection is broken because %d (good) is >= '
- '%d (bad)' % (test_tryjobs[2]['rev'], test_tryjobs[0]['rev']))
+ expected_error_message = ('Bisection is broken because %d (good) is >= '
+ '%d (bad)' %
+ (test_tryjobs[2]['rev'], test_tryjobs[0]['rev']))
self.assertEqual(str(err.exception), expected_error_message)
- def testSuccessfullyGetNewStartAndNewEndRevision(self):
- start = 100
- end = 150
-
- test_tryjobs = [{
- 'rev': 110,
- 'status': 'good',
- 'link': 'https://some_tryjob_1_url.com'
- }, {
- 'rev': 120,
- 'status': 'good',
- 'link': 'https://some_tryjob_2_url.com'
- }, {
- 'rev': 130,
- 'status': 'pending',
- 'link': 'https://some_tryjob_3_url.com'
- }, {
- 'rev': 135,
- 'status': 'skip',
- 'link': 'https://some_tryjob_4_url.com'
- }, {
- 'rev': 140,
- 'status': 'bad',
- 'link': 'https://some_tryjob_5_url.com'
- }]
-
- # Tuple consists of the new good revision, the new bad revision, a set of
- # 'pending' revisions, and a set of 'skip' revisions.
- expected_revisions_tuple = 120, 140, {130}, {135}
-
- self.assertTupleEqual(
- llvm_bisection.GetStartAndEndRevision(start, end, test_tryjobs),
- expected_revisions_tuple)
-
@mock.patch.object(get_llvm_hash, 'GetGitHashFrom')
- def testNoRevisionsBetweenStartAndEnd(self, mock_get_git_hash):
- start = 100
- end = 110
-
- test_pending_revisions = {107}
- test_skip_revisions = {101, 102, 103, 104, 108, 109}
-
- # Simulate behavior of `GetGitHashFrom()` when the revision does not
- # exist in the LLVM source tree.
- def MockGetGitHashForRevisionRaiseException(_src_path, _revision):
- raise ValueError('Revision does not exist')
-
- mock_get_git_hash.side_effect = MockGetGitHashForRevisionRaiseException
-
- parallel = 3
-
- abs_path_to_src = '/abs/path/to/src'
-
- self.assertListEqual(
- llvm_bisection.GetRevisionsBetweenBisection(
- start, end, parallel, abs_path_to_src, test_pending_revisions,
- test_skip_revisions), [])
-
- # Assume llvm_bisection module has imported GetGitHashFrom
- @mock.patch.object(get_llvm_hash, 'GetGitHashFrom')
- def testSuccessfullyRetrievedRevisionsBetweenStartAndEnd(
- self, mock_get_git_hash):
-
- start = 100
- end = 110
-
- test_pending_revisions = set()
- test_skip_revisions = {101, 102, 103, 104, 106, 108, 109}
-
+ def testGetCommitsBetweenPassed(self, mock_get_git_hash):
+ start = git_llvm_rev.base_llvm_revision
+ end = start + 10
+ test_pending_revisions = {start + 7}
+ test_skip_revisions = {
+ start + 1, start + 2, start + 4, start + 8, start + 9
+ }
parallel = 3
-
abs_path_to_src = '/abs/path/to/src'
- # Valid revision that exist in the LLVM source tree between 'start' and
- # 'end' and were not in the 'pending' set or 'skip' set.
- expected_revisions_between_start_and_end = [105, 107]
-
- self.assertListEqual(
- llvm_bisection.GetRevisionsBetweenBisection(
- start, end, parallel, abs_path_to_src, test_pending_revisions,
- test_skip_revisions), expected_revisions_between_start_and_end)
-
- self.assertEqual(mock_get_git_hash.call_count, 2)
+ revs = ['a123testhash3', 'a123testhash5']
+ mock_get_git_hash.side_effect = revs
- # Simulate behavior of `GetGitHashFrom()` when successfully retrieved
- # a list git hashes for each revision in the revisions list.
- # Assume llvm_bisection module has imported GetGitHashFrom
- @mock.patch.object(get_llvm_hash, 'GetGitHashFrom')
- # Simulate behavior of `GetRevisionsBetweenBisection()` when successfully
- # retrieved a list of valid revisions between 'start' and 'end'.
- @mock.patch.object(llvm_bisection, 'GetRevisionsBetweenBisection')
- # Simulate behavior of `CreatTempLLVMRepo()` when successfully created a
- # worktree when a source path was not provided.
- @mock.patch.object(get_llvm_hash, 'CreateTempLLVMRepo')
- def testSuccessfullyGetRevisionsListAndHashList(
- self, mock_create_temp_llvm_repo, mock_get_revisions_between_bisection,
- mock_get_git_hash):
-
- expected_revisions_and_hash_tuple = ([102, 105, 108], [
- 'a123testhash1', 'a123testhash2', 'a123testhash3'
- ])
-
- @test_helpers.CallCountsToMockFunctions
- def MockGetGitHashForRevision(call_count, _src_path, _rev):
- # Simulate retrieving the git hash for the revision.
- if call_count < 3:
- return expected_revisions_and_hash_tuple[1][call_count]
-
- assert False, 'Called `GetGitHashFrom()` more than expected.'
-
- temp_worktree = '/abs/path/to/tmpDir'
-
- mock_create_temp_llvm_repo.return_value.__enter__.return_value.name = \
- temp_worktree
-
- # Simulate the valid revisions list.
- mock_get_revisions_between_bisection.return_value = \
- expected_revisions_and_hash_tuple[0]
-
- # Simulate behavior of `GetGitHashFrom()` by using the testing
- # function.
- mock_get_git_hash.side_effect = MockGetGitHashForRevision
+ git_hashes = [
+ git_llvm_rev.base_llvm_revision + 3, git_llvm_rev.base_llvm_revision + 5
+ ]
- start = 100
- end = 110
- parallel = 3
- src_path = None
- pending_revisions = {103, 104}
- skip_revisions = {101, 106, 107, 109}
-
- self.assertTupleEqual(
- llvm_bisection.GetRevisionsListAndHashList(
- start, end, parallel, src_path, pending_revisions, skip_revisions),
- expected_revisions_and_hash_tuple)
-
- mock_get_revisions_between_bisection.assert_called_once()
-
- self.assertEqual(mock_get_git_hash.call_count, 3)
+ self.assertEqual(
+ llvm_bisection.GetCommitsBetween(start, end, parallel, abs_path_to_src,
+ test_pending_revisions,
+ test_skip_revisions),
+ (git_hashes, revs))
- def testSuccessfullyDieWithNoRevisionsError(self):
+ def testLoadStatusFilePassedWithExistingFile(self):
start = 100
- end = 110
-
- pending_revisions = {105, 108}
- skip_revisions = {101, 102, 103, 104, 106, 107, 109}
-
- expected_no_revisions_message = ('No revisions between start %d and end '
- '%d to create tryjobs' % (start, end))
-
- expected_no_revisions_message += '\nThe following tryjobs are pending:\n' \
- + '\n'.join(str(rev) for rev in pending_revisions)
-
- expected_no_revisions_message += '\nThe following tryjobs were skipped:\n' \
- + '\n'.join(str(rev) for rev in skip_revisions)
-
- # Verify that an exception is raised when there are no revisions to launch
- # tryjobs for between 'start' and 'end' and some tryjobs are 'pending'.
- with self.assertRaises(ValueError) as err:
- llvm_bisection.DieWithNoRevisionsError(start, end, skip_revisions,
- pending_revisions)
+ end = 150
- self.assertEqual(str(err.exception), expected_no_revisions_message)
+ test_bisect_state = {'start': start, 'end': end, 'jobs': []}
- # Simulate behavior of `FindTryjobIndex()` when the index of the tryjob was
- # found.
- @mock.patch.object(update_tryjob_status, 'FindTryjobIndex', return_value=0)
- def testTryjobExistsInRevisionsToLaunch(self, mock_find_tryjob_index):
- test_existing_jobs = [{'rev': 102, 'status': 'good'}]
+ # Simulate that the status file exists.
+ with test_helpers.CreateTemporaryJsonFile() as temp_json_file:
+ with open(temp_json_file, 'w') as f:
+ test_helpers.WritePrettyJsonFile(test_bisect_state, f)
- revision_to_launch = [102]
+ self.assertEqual(
+ llvm_bisection.LoadStatusFile(temp_json_file, start, end),
+ test_bisect_state)
- expected_revision_that_exists = 102
+ def testLoadStatusFilePassedWithoutExistingFile(self):
+ start = 200
+ end = 250
- with self.assertRaises(ValueError) as err:
- llvm_bisection.CheckForExistingTryjobsInRevisionsToLaunch(
- revision_to_launch, test_existing_jobs)
+ expected_bisect_state = {'start': start, 'end': end, 'jobs': []}
- expected_found_tryjob_index_error_message = (
- 'Revision %d exists already '
- 'in "jobs"' % expected_revision_that_exists)
+ last_tested = '/abs/path/to/file_that_does_not_exist.json'
self.assertEqual(
- str(err.exception), expected_found_tryjob_index_error_message)
-
- mock_find_tryjob_index.assert_called_once()
+ llvm_bisection.LoadStatusFile(last_tested, start, end),
+ expected_bisect_state)
@mock.patch.object(modify_a_tryjob, 'AddTryjob')
- def testSuccessfullyUpdatedStatusFileWhenExceptionIsRaised(
- self, mock_add_tryjob):
+ def testBisectPassed(self, mock_add_tryjob):
git_hash_list = ['a123testhash1', 'a123testhash2', 'a123testhash3']
revisions_list = [102, 104, 106]
@@ -343,11 +208,11 @@ class LLVMBisectionTest(unittest.TestCase):
# Verify that the status file is updated when an exception happened when
# attempting to launch a revision (i.e. progress is not lost).
with self.assertRaises(ValueError) as err:
- llvm_bisection.UpdateBisection(
- revisions_list, git_hash_list, bisection_contents, temp_json_file,
- packages, args_output.chroot_path, patch_file,
- args_output.extra_change_lists, args_output.options,
- args_output.builders, args_output.verbose)
+ llvm_bisection.Bisect(revisions_list, git_hash_list, bisection_contents,
+ temp_json_file, packages, args_output.chroot_path,
+ patch_file, args_output.extra_change_lists,
+ args_output.options, args_output.builders,
+ args_output.verbose)
expected_bisection_contents = {
'start':
@@ -368,121 +233,143 @@ class LLVMBisectionTest(unittest.TestCase):
with open(temp_json_file) as f:
json_contents = json.load(f)
- self.assertDictEqual(json_contents, expected_bisection_contents)
+ self.assertEqual(json_contents, expected_bisection_contents)
self.assertEqual(str(err.exception), 'Unable to launch tryjob')
self.assertEqual(mock_add_tryjob.call_count, 3)
- # Simulate behavior of `GetGitHashFrom()` when successfully retrieved
- # the git hash of the bad revision. Assume llvm_bisection has imported
- # GetGitHashFrom
+ @mock.patch.object(subprocess, 'check_output', return_value=None)
@mock.patch.object(
- get_llvm_hash, 'GetGitHashFrom', return_value='a123testhash4')
- def testCompletedBisectionWhenProvidedSrcPath(self, mock_get_git_hash):
- last_tested = '/some/last/tested_file.json'
+ get_llvm_hash.LLVMHash, 'GetLLVMHash', return_value='a123testhash4')
+ @mock.patch.object(llvm_bisection, 'GetCommitsBetween')
+ @mock.patch.object(llvm_bisection, 'GetRemainingRange')
+ @mock.patch.object(llvm_bisection, 'LoadStatusFile')
+ @mock.patch.object(chroot, 'VerifyOutsideChroot', return_value=True)
+ def testMainPassed(self, mock_outside_chroot, mock_load_status_file,
+ mock_get_range, mock_get_revision_and_hash_list,
+ _mock_get_bad_llvm_hash, mock_abandon_cl):
- src_path = '/abs/path/to/src/path'
+ start = 500
+ end = 502
+ cl = 1
- # The bad revision.
- end = 150
+ bisect_state = {
+ 'start': start,
+ 'end': end,
+ 'jobs': [{
+ 'rev': 501,
+ 'status': 'bad',
+ 'cl': cl
+ }]
+ }
- llvm_bisection._NoteCompletedBisection(last_tested, src_path, end)
+ skip_revisions = {501}
+ pending_revisions = {}
- mock_get_git_hash.assert_called_once()
+ mock_load_status_file.return_value = bisect_state
- # Simulate behavior of `GetLLVMHash()` when successfully retrieved
- # the git hash of the bad revision.
- @mock.patch.object(
- get_llvm_hash.LLVMHash, 'GetLLVMHash', return_value='a123testhash5')
- def testCompletedBisectionWhenNotProvidedSrcPath(self, mock_get_git_hash):
- last_tested = '/some/last/tested_file.json'
+ mock_get_range.return_value = (start, end, pending_revisions,
+ skip_revisions)
- src_path = None
+ mock_get_revision_and_hash_list.return_value = [], []
- # The bad revision.
- end = 200
+ args_output = test_helpers.ArgsOutputTest()
+ args_output.start_rev = start
+ args_output.end_rev = end
+ args_output.parallel = 3
+ args_output.src_path = None
+ args_output.chroot_path = 'somepath'
+ args_output.cleanup = True
- llvm_bisection._NoteCompletedBisection(last_tested, src_path, end)
+ self.assertEqual(
+ llvm_bisection.main(args_output),
+ llvm_bisection.BisectionExitStatus.BISECTION_COMPLETE.value)
- mock_get_git_hash.assert_called_once()
+ mock_outside_chroot.assert_called_once()
- def testSuccessfullyLoadedStatusFile(self):
- start = 100
- end = 150
+ mock_load_status_file.assert_called_once()
- test_bisect_contents = {'start': start, 'end': end, 'jobs': []}
+ mock_get_range.assert_called_once()
- # Simulate that the status file exists.
- with test_helpers.CreateTemporaryJsonFile() as temp_json_file:
- with open(temp_json_file, 'w') as f:
- test_helpers.WritePrettyJsonFile(test_bisect_contents, f)
+ mock_get_revision_and_hash_list.assert_called_once()
- self.assertDictEqual(
- llvm_bisection.LoadStatusFile(temp_json_file, start, end),
- test_bisect_contents)
+ mock_abandon_cl.assert_called_once()
+ self.assertEqual(
+ mock_abandon_cl.call_args,
+ mock.call(
+ [
+ os.path.join(args_output.chroot_path, 'chromite/bin/gerrit'),
+ 'abandon',
+ cl,
+ ],
+ stderr=subprocess.STDOUT,
+ encoding='utf-8',
+ ))
- def testLoadedStatusFileThatDoesNotExist(self):
- start = 200
- end = 250
+ @mock.patch.object(llvm_bisection, 'LoadStatusFile')
+ @mock.patch.object(chroot, 'VerifyOutsideChroot', return_value=True)
+ def testMainFailedWithInvalidRange(self, mock_outside_chroot,
+ mock_load_status_file):
- expected_bisect_contents = {'start': start, 'end': end, 'jobs': []}
+ start = 500
+ end = 502
- last_tested = '/abs/path/to/file_that_does_not_exist.json'
+ bisect_state = {
+ 'start': start - 1,
+ 'end': end,
+ }
- self.assertDictEqual(
- llvm_bisection.LoadStatusFile(last_tested, start, end),
- expected_bisect_contents)
-
- # Simulate behavior of `_NoteCompletedBisection()` when there are no more
- # tryjobs to launch between start and end, so bisection is complete.
- @mock.patch.object(llvm_bisection, '_NoteCompletedBisection')
- @mock.patch.object(llvm_bisection, 'GetRevisionsListAndHashList')
- @mock.patch.object(llvm_bisection, 'GetStartAndEndRevision')
- # Simulate behavior of `_ValidateStartAndEndAgainstJSONStartAndEnd()` when
- # both start and end revisions match.
- @mock.patch.object(llvm_bisection,
- '_ValidateStartAndEndAgainstJSONStartAndEnd')
+ mock_load_status_file.return_value = bisect_state
+
+ args_output = test_helpers.ArgsOutputTest()
+ args_output.start_rev = start
+ args_output.end_rev = end
+ args_output.parallel = 3
+ args_output.src_path = None
+
+ with self.assertRaises(ValueError) as err:
+ llvm_bisection.main(args_output)
+
+ error_message = (f'The start {start} or the end {end} version provided is '
+ f'different than "start" {bisect_state["start"]} or "end" '
+ f'{bisect_state["end"]} in the .JSON file')
+
+ self.assertEqual(str(err.exception), error_message)
+
+ mock_outside_chroot.assert_called_once()
+
+ mock_load_status_file.assert_called_once()
+
+ @mock.patch.object(llvm_bisection, 'GetCommitsBetween')
+ @mock.patch.object(llvm_bisection, 'GetRemainingRange')
@mock.patch.object(llvm_bisection, 'LoadStatusFile')
- # Simulate behavior of `VerifyOutsideChroot()` when successfully invoked the
- # script outside of the chroot.
@mock.patch.object(chroot, 'VerifyOutsideChroot', return_value=True)
- def testSuccessfullyBisectedLLVM(
- self, mock_outside_chroot, mock_load_status_file,
- mock_validate_start_and_end, mock_get_start_and_end_revision,
- mock_get_revision_and_hash_list, mock_note_completed_bisection):
+ def testMainFailedWithPendingBuilds(self, mock_outside_chroot,
+ mock_load_status_file, mock_get_range,
+ mock_get_revision_and_hash_list):
start = 500
end = 502
+ rev = 501
- bisect_contents = {
+ bisect_state = {
'start': start,
'end': end,
'jobs': [{
- 'rev': 501,
- 'status': 'skip'
+ 'rev': rev,
+ 'status': 'pending'
}]
}
- skip_revisions = {501}
- pending_revisions = {}
+ skip_revisions = {}
+ pending_revisions = {rev}
+
+ mock_load_status_file.return_value = bisect_state
+
+ mock_get_range.return_value = (start, end, pending_revisions,
+ skip_revisions)
- # Simulate behavior of `LoadStatusFile()` when successfully loaded the
- # status file.
- mock_load_status_file.return_value = bisect_contents
-
- # Simulate behavior of `GetStartAndEndRevision()` when successfully found
- # the new start and end revision of the bisection.
- #
- # Returns new start revision, new end revision, a set of pending revisions,
- # and a set of skip revisions.
- mock_get_start_and_end_revision.return_value = (start, end,
- pending_revisions,
- skip_revisions)
-
- # Simulate behavior of `GetRevisionsListAndHashList()` when successfully
- # retrieved valid revisions (along with their git hashes) between start and
- # end (in this case, none).
mock_get_revision_and_hash_list.return_value = [], []
args_output = test_helpers.ArgsOutputTest()
@@ -491,110 +378,131 @@ class LLVMBisectionTest(unittest.TestCase):
args_output.parallel = 3
args_output.src_path = None
- self.assertEqual(
- llvm_bisection.main(args_output),
- llvm_bisection.BisectionExitStatus.BISECTION_COMPLETE.value)
+ with self.assertRaises(ValueError) as err:
+ llvm_bisection.main(args_output)
+
+ error_message = (f'No revisions between start {start} and end {end} to '
+ 'create tryjobs\nThe following tryjobs are pending:\n'
+ f'{rev}\n')
+
+ self.assertEqual(str(err.exception), error_message)
mock_outside_chroot.assert_called_once()
mock_load_status_file.assert_called_once()
- mock_validate_start_and_end.assert_called_once()
-
- mock_get_start_and_end_revision.assert_called_once()
+ mock_get_range.assert_called_once()
mock_get_revision_and_hash_list.assert_called_once()
- mock_note_completed_bisection.assert_called_once()
-
- @mock.patch.object(llvm_bisection, 'DieWithNoRevisionsError')
- # Simulate behavior of `_NoteCompletedBisection()` when there are no more
- # tryjobs to launch between start and end, so bisection is complete.
- @mock.patch.object(llvm_bisection, 'GetRevisionsListAndHashList')
- @mock.patch.object(llvm_bisection, 'GetStartAndEndRevision')
- # Simulate behavior of `_ValidateStartAndEndAgainstJSONStartAndEnd()` when
- # both start and end revisions match.
- @mock.patch.object(llvm_bisection,
- '_ValidateStartAndEndAgainstJSONStartAndEnd')
+ @mock.patch.object(llvm_bisection, 'GetCommitsBetween')
+ @mock.patch.object(llvm_bisection, 'GetRemainingRange')
@mock.patch.object(llvm_bisection, 'LoadStatusFile')
- # Simulate behavior of `VerifyOutsideChroot()` when successfully invoked the
- # script outside of the chroot.
@mock.patch.object(chroot, 'VerifyOutsideChroot', return_value=True)
- def testNoMoreTryjobsToLaunch(
- self, mock_outside_chroot, mock_load_status_file,
- mock_validate_start_and_end, mock_get_start_and_end_revision,
- mock_get_revision_and_hash_list, mock_die_with_no_revisions_error):
+ def testMainFailedWithDuplicateBuilds(self, mock_outside_chroot,
+ mock_load_status_file, mock_get_range,
+ mock_get_revision_and_hash_list):
start = 500
end = 502
+ rev = 501
+ git_hash = 'a123testhash1'
- bisect_contents = {
+ bisect_state = {
'start': start,
'end': end,
'jobs': [{
- 'rev': 501,
+ 'rev': rev,
'status': 'pending'
}]
}
skip_revisions = {}
- pending_revisions = {501}
-
- no_revisions_error_message = ('No more tryjobs to launch between %d and '
- '%d' % (start, end))
-
- def MockNoRevisionsErrorException(_start, _end, _skip, _pending):
- raise ValueError(no_revisions_error_message)
-
- # Simulate behavior of `LoadStatusFile()` when successfully loaded the
- # status file.
- mock_load_status_file.return_value = bisect_contents
-
- # Simulate behavior of `GetStartAndEndRevision()` when successfully found
- # the new start and end revision of the bisection.
- #
- # Returns new start revision, new end revision, a set of pending revisions,
- # and a set of skip revisions.
- mock_get_start_and_end_revision.return_value = (start, end,
- pending_revisions,
- skip_revisions)
-
- # Simulate behavior of `GetRevisionsListAndHashList()` when successfully
- # retrieved valid revisions (along with their git hashes) between start and
- # end (in this case, none).
- mock_get_revision_and_hash_list.return_value = [], []
+ pending_revisions = {rev}
+
+ mock_load_status_file.return_value = bisect_state
- # Use the test function to simulate `DieWithNoRevisionsWithError()`
- # behavior.
- mock_die_with_no_revisions_error.side_effect = MockNoRevisionsErrorException
+ mock_get_range.return_value = (start, end, pending_revisions,
+ skip_revisions)
+
+ mock_get_revision_and_hash_list.return_value = [rev], [git_hash]
- # Simulate behavior of arguments passed into the command line.
args_output = test_helpers.ArgsOutputTest()
args_output.start_rev = start
args_output.end_rev = end
args_output.parallel = 3
args_output.src_path = None
- # Verify the exception is raised when there are no more tryjobs to launch
- # between start and end when there are tryjobs that are 'pending', so
- # the actual bad revision can change when those tryjobs's 'status' are
- # updated.
with self.assertRaises(ValueError) as err:
llvm_bisection.main(args_output)
- self.assertEqual(str(err.exception), no_revisions_error_message)
+ error_message = ('Revision %d exists already in "jobs"' % rev)
+ self.assertEqual(str(err.exception), error_message)
mock_outside_chroot.assert_called_once()
mock_load_status_file.assert_called_once()
- mock_validate_start_and_end.assert_called_once()
-
- mock_get_start_and_end_revision.assert_called_once()
+ mock_get_range.assert_called_once()
mock_get_revision_and_hash_list.assert_called_once()
- mock_die_with_no_revisions_error.assert_called_once()
+ @mock.patch.object(subprocess, 'check_output', return_value=None)
+ @mock.patch.object(
+ get_llvm_hash.LLVMHash, 'GetLLVMHash', return_value='a123testhash4')
+ @mock.patch.object(llvm_bisection, 'GetCommitsBetween')
+ @mock.patch.object(llvm_bisection, 'GetRemainingRange')
+ @mock.patch.object(llvm_bisection, 'LoadStatusFile')
+ @mock.patch.object(chroot, 'VerifyOutsideChroot', return_value=True)
+ def testMainFailedToAbandonCL(self, mock_outside_chroot,
+ mock_load_status_file, mock_get_range,
+ mock_get_revision_and_hash_list,
+ _mock_get_bad_llvm_hash, mock_abandon_cl):
+
+ start = 500
+ end = 502
+
+ bisect_state = {
+ 'start': start,
+ 'end': end,
+ 'jobs': [{
+ 'rev': 501,
+ 'status': 'bad',
+ 'cl': 0
+ }]
+ }
+
+ skip_revisions = {501}
+ pending_revisions = {}
+
+ mock_load_status_file.return_value = bisect_state
+
+ mock_get_range.return_value = (start, end, pending_revisions,
+ skip_revisions)
+
+ mock_get_revision_and_hash_list.return_value = ([], [])
+
+ error_message = 'Error message.'
+ mock_abandon_cl.side_effect = subprocess.CalledProcessError(
+ returncode=1, cmd=[], output=error_message)
+
+ args_output = test_helpers.ArgsOutputTest()
+ args_output.start_rev = start
+ args_output.end_rev = end
+ args_output.parallel = 3
+ args_output.src_path = None
+ args_output.cleanup = True
+
+ with self.assertRaises(subprocess.CalledProcessError) as err:
+ llvm_bisection.main(args_output)
+
+ self.assertEqual(err.exception.output, error_message)
+
+ mock_outside_chroot.assert_called_once()
+
+ mock_load_status_file.assert_called_once()
+
+ mock_get_range.assert_called_once()
if __name__ == '__main__':
diff --git a/llvm_tools/llvm_project.py b/llvm_tools/llvm_project.py
index c171370c..7937729f 100644
--- a/llvm_tools/llvm_project.py
+++ b/llvm_tools/llvm_project.py
@@ -13,6 +13,7 @@ import subprocess
import sys
import get_llvm_hash
+import git_llvm_rev
def get_location() -> str:
@@ -33,9 +34,13 @@ def ensure_up_to_date():
assert checkout == actual_checkout, '%s != %s' % (actual_checkout, checkout)
commit_timestamp = subprocess.check_output(
- ['git', 'log', '-n1', '--format=%ct', 'origin/master'],
+ [
+ 'git', 'log', '-n1', '--format=%ct',
+ 'origin/' + git_llvm_rev.MAIN_BRANCH
+ ],
cwd=checkout,
- encoding='utf-8')
+ encoding='utf-8',
+ )
commit_time = datetime.datetime.fromtimestamp(int(commit_timestamp.strip()))
now = datetime.datetime.now()
@@ -52,7 +57,7 @@ def ensure_up_to_date():
(time_since_last_commit.days, checkout),
file=sys.stderr)
- result = subprocess.run(['git', 'fetch', 'origin'], cwd=checkout)
+ result = subprocess.run(['git', 'fetch', 'origin'], check=False, cwd=checkout)
if result.returncode:
print(
'Sync failed somehow; hoping that things are fresh enough, then...',
diff --git a/llvm_tools/nightly_revert_checker.py b/llvm_tools/nightly_revert_checker.py
index 71b5574b..3a23890a 100755
--- a/llvm_tools/nightly_revert_checker.py
+++ b/llvm_tools/nightly_revert_checker.py
@@ -27,6 +27,7 @@ import typing as t
import cros_utils.email_sender as email_sender
import cros_utils.tiny_render as tiny_render
import get_llvm_hash
+import git_llvm_rev
import revert_checker
State = t.Any
@@ -44,12 +45,12 @@ def _find_interesting_android_shas(
encoding='utf-8',
).strip()
- master_legacy = get_llvm_merge_base('aosp/master-legacy')
+ main_legacy = get_llvm_merge_base('aosp/master-legacy')
testing_upstream = get_llvm_merge_base('aosp/testing-upstream')
- result = [('master-legacy', master_legacy)]
+ result = [('main-legacy', main_legacy)]
# If these are the same SHA, there's no point in tracking both.
- if master_legacy != testing_upstream:
+ if main_legacy != testing_upstream:
result.append(('testing-upstream', testing_upstream))
return result
@@ -270,7 +271,7 @@ def main(argv: t.List[str]) -> None:
for friendly_name, sha in interesting_shas:
logging.info('Finding reverts across %s (%s)', friendly_name, sha)
all_reverts = revert_checker.find_reverts(
- llvm_dir, sha, root='origin/master')
+ llvm_dir, sha, root='origin/' + git_llvm_rev.MAIN_BRANCH)
logging.info('Detected the following revert(s) across %s:\n%s',
friendly_name, pprint.pformat(all_reverts))
diff --git a/llvm_tools/upload_lexan_crashes_to_forcey.py b/llvm_tools/upload_lexan_crashes_to_forcey.py
new file mode 100755
index 00000000..b93f51a7
--- /dev/null
+++ b/llvm_tools/upload_lexan_crashes_to_forcey.py
@@ -0,0 +1,258 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Fetches and submits the latest test-cases from Lexan's crash bucket."""
+
+# pylint: disable=cros-logging-import
+
+import argparse
+import contextlib
+import datetime
+import json
+import logging
+import os
+import shutil
+import subprocess
+import sys
+import tempfile
+from typing import Generator, List, Iterable
+
+gsurl_base = 'gs://chrome-clang-crash-reports/v1'
+
+
+def gsutil_ls(loc: str) -> List[str]:
+ results = subprocess.run(['gsutil.py', 'ls', loc],
+ stdout=subprocess.PIPE,
+ check=True,
+ encoding='utf-8')
+ return [l.strip() for l in results.stdout.splitlines()]
+
+
+def gsurl_ls_last_numbers(url: str) -> List[int]:
+ return sorted(int(x.rstrip('/').split('/')[-1]) for x in gsutil_ls(url))
+
+
+def get_available_year_numbers() -> List[int]:
+ return gsurl_ls_last_numbers(gsurl_base)
+
+
+def get_available_month_numbers(year: int) -> List[int]:
+ return gsurl_ls_last_numbers(f'{gsurl_base}/{year}')
+
+
+def get_available_day_numbers(year: int, month: int) -> List[int]:
+ return gsurl_ls_last_numbers(f'{gsurl_base}/{year}/{month:02d}')
+
+
+def get_available_test_case_urls(year: int, month: int, day: int) -> List[str]:
+ return gsutil_ls(f'{gsurl_base}/{year}/{month:02d}/{day:02d}')
+
+
+def test_cases_on_or_after(date: datetime.datetime
+ ) -> Generator[str, None, None]:
+ """Yields all test-cases submitted on or after the given date."""
+ for year in get_available_year_numbers():
+ if year < date.year:
+ continue
+
+ for month in get_available_month_numbers(year):
+ if year == date.year and month < date.month:
+ continue
+
+ for day in get_available_day_numbers(year, month):
+ when = datetime.date(year, month, day)
+ if when < date:
+ continue
+
+ yield when, get_available_test_case_urls(year, month, day)
+
+
+def to_ymd(date: datetime.date) -> str:
+ return date.strftime('%Y-%m-%d')
+
+
+def from_ymd(date_str: str) -> datetime.date:
+ return datetime.datetime.strptime(date_str, '%Y-%m-%d').date()
+
+
+def persist_state(seen_urls: Iterable[str], state_file: str,
+ current_date: datetime.date):
+ tmp_state_file = state_file + '.tmp'
+ with open(tmp_state_file, 'w', encoding='utf-8') as f:
+ json.dump(
+ {
+ 'already_seen': sorted(seen_urls),
+ 'most_recent_date': to_ymd(current_date),
+ },
+ f,
+ )
+ os.rename(tmp_state_file, state_file)
+
+
+@contextlib.contextmanager
+def temp_dir() -> Generator[str, None, None]:
+ loc = tempfile.mkdtemp('lexan-autosubmit')
+ try:
+ yield loc
+ finally:
+ shutil.rmtree(loc)
+
+
+def submit_test_case(gs_url: str, cr_tool: str) -> None:
+ logging.info('Submitting %s', gs_url)
+ suffix = os.path.splitext(gs_url)[1]
+ with temp_dir() as tempdir:
+ target_name = 'test_case' + suffix
+ target = os.path.join(tempdir, target_name)
+ subprocess.run(['gsutil.py', 'cp', gs_url, target], check=True)
+ subprocess.run(['tar', 'xaf', target_name], check=True, cwd=tempdir)
+ os.unlink(target)
+
+ # Sometimes (e.g., in
+ # gs://chrome-clang-crash-reports/v1/2020/03/27/
+ # chromium.clang-ToTiOS-12754-GTXToolKit-2bfcde.tgz)
+ # we'll get `.crash` files. Unclear why, but let's filter them out anyway.
+ repro_files = [x for x in os.listdir(tempdir) if not x.endswith('.crash')]
+ assert len(repro_files) == 2, repro_files
+ if repro_files[0].endswith('.sh'):
+ sh_file, src_file = repro_files
+ assert not src_file.endswith('.sh'), repro_files
+ else:
+ src_file, sh_file = repro_files
+ assert sh_file.endswith('.sh'), repro_files
+
+ subprocess.run(
+ [
+ cr_tool,
+ 'reduce',
+ '-stream=false',
+ '-wait=false',
+ '-note',
+ gs_url,
+ '-sh_file',
+ os.path.join(tempdir, sh_file),
+ '-src_file',
+ os.path.join(tempdir, src_file),
+ ],
+ check=True,
+ )
+
+
+def submit_new_test_cases(
+ last_seen_test_cases: Iterable[str],
+ earliest_date_to_check: datetime.date,
+ forcey: str,
+ state_file_path: str,
+) -> None:
+ """Submits new test-cases to forcey.
+
+ This will persist state after each test-case is submitted.
+
+ Args:
+ last_seen_test_cases: test-cases which have been submitted already, and
+ should be skipped if seen again.
+ earliest_date_to_check: the earliest date we should consider test-cases
+ from.
+ forcey: path to the forcey binary.
+ state_file_path: path to our state file.
+ """
+ # `all_test_cases_seen` is the union of all test-cases seen on this and prior
+ # invocations. It guarantees, in all cases we care about, that we won't
+ # submit the same test-case twice. `test_cases_seen_this_invocation` is
+ # persisted as "all of the test-cases we've seen on this and prior
+ # invocations" if we successfully submit _all_ test-cases.
+ #
+ # Since you can visualize the test-cases this script considers as a sliding
+ # window that only moves forward, if we saw a test-case on a prior iteration
+ # but no longer see it, we'll never see it again (since it fell out of our
+ # sliding window by being too old). Hence, keeping it around is
+ # pointless.
+ #
+ # We only persist this minimized set of test-cases if _everything_ succeeds,
+ # since if something fails below, there's a chance that we haven't revisited
+ # test-cases that we've already seen.
+ all_test_cases_seen = set(last_seen_test_cases)
+ test_cases_seen_this_invocation = []
+ most_recent_date = earliest_date_to_check
+ for date, candidates in test_cases_on_or_after(earliest_date_to_check):
+ most_recent_date = max(most_recent_date, date)
+
+ for url in candidates:
+ test_cases_seen_this_invocation.append(url)
+ if url in all_test_cases_seen:
+ continue
+
+ all_test_cases_seen.add(url)
+ submit_test_case(url, forcey)
+
+ # Persisting on each iteration of this loop isn't free, but it's the
+ # easiest way to not resubmit test-cases, and it's good to keep in mind
+ # that:
+ # - the state file will be small (<12KB, since it only keeps a few days
+ # worth of test-cases after the first run)
+ # - in addition to this, we're downloading+unzipping+reuploading multiple
+ # MB of test-case bytes.
+ #
+ # So comparatively, the overhead here probably isn't an issue.
+ persist_state(all_test_cases_seen, state_file_path, most_recent_date)
+
+ persist_state(test_cases_seen_this_invocation, state_file_path,
+ most_recent_date)
+
+
+def main(argv: List[str]):
+ logging.basicConfig(
+ format='>> %(asctime)s: %(levelname)s: %(filename)s:%(lineno)d: '
+ '%(message)s',
+ level=logging.INFO,
+ )
+
+ my_dir = os.path.dirname(os.path.abspath(__file__))
+
+ parser = argparse.ArgumentParser(description=__doc__)
+ parser.add_argument(
+ '--state_file', default=os.path.join(my_dir, 'lexan-state.json'))
+ parser.add_argument(
+ '--last_date',
+ help='The earliest date that we care about. All test cases from here '
+ 'on will be picked up. Format is YYYY-MM-DD.')
+ parser.add_argument(
+ '--4c', dest='forcey', required=True, help='Path to a 4c client binary')
+ opts = parser.parse_args(argv)
+
+ forcey = opts.forcey
+ state_file = opts.state_file
+ last_date_str = opts.last_date
+
+ os.makedirs(os.path.dirname(state_file), 0o755, exist_ok=True)
+
+ if last_date_str is None:
+ with open(state_file, encoding='utf-8') as f:
+ data = json.load(f)
+ most_recent_date = from_ymd(data['most_recent_date'])
+ submit_new_test_cases(
+ last_seen_test_cases=data['already_seen'],
+ # Note that we always subtract one day from this to avoid a race:
+ # uploads may appear slightly out-of-order (or builders may lag, or
+ # ...), so the last test-case uploaded for 2020/01/01 might appear
+ # _after_ the first test-case for 2020/01/02. Assuming that builders
+ # won't lag behind for over a day, the easiest way to handle this is to
+ # always check the previous and current days.
+ earliest_date_to_check=most_recent_date - datetime.timedelta(days=1),
+ forcey=forcey,
+ state_file_path=state_file,
+ )
+ else:
+ submit_new_test_cases(
+ last_seen_test_cases=(),
+ earliest_date_to_check=from_ymd(last_date_str),
+ forcey=forcey,
+ state_file_path=state_file,
+ )
+
+
+if __name__ == '__main__':
+ sys.exit(main(sys.argv[1:]))
diff --git a/llvm_tools/upload_lexan_crashes_to_forcey_test.py b/llvm_tools/upload_lexan_crashes_to_forcey_test.py
new file mode 100755
index 00000000..3c9c0d4b
--- /dev/null
+++ b/llvm_tools/upload_lexan_crashes_to_forcey_test.py
@@ -0,0 +1,122 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Tests for upload_lexan_crashes_to_forcey."""
+
+import datetime
+import unittest
+import unittest.mock
+
+import upload_lexan_crashes_to_forcey
+
+
+class Test(unittest.TestCase):
+ """Tests for upload_lexan_crashes_to_forcey."""
+
+ def test_date_parsing_functions(self):
+ self.assertEqual(
+ datetime.date(2020, 2, 1),
+ upload_lexan_crashes_to_forcey.from_ymd('2020-02-01'))
+
+ @unittest.mock.patch(
+ 'upload_lexan_crashes_to_forcey.test_cases_on_or_after',
+ return_value=(
+ (
+ datetime.date(2020, 1, 1),
+ ('gs://test-case-1', 'gs://test-case-1.1'),
+ ),
+ (datetime.date(2020, 1, 2), ('gs://test-case-2',)),
+ (datetime.date(2020, 1, 1), ('gs://test-case-3',)),
+ (datetime.date(2020, 1, 4), ('gs://test-case-4',)),
+ ))
+ @unittest.mock.patch('upload_lexan_crashes_to_forcey.submit_test_case')
+ @unittest.mock.patch('upload_lexan_crashes_to_forcey.persist_state')
+ def test_new_test_case_submission_functions(self, persist_state_mock,
+ submit_test_case_mock,
+ test_cases_on_or_after_mock):
+ forcey_path = '/path/to/4c'
+ real_state_file_path = '/path/to/state/file'
+ earliest_date = datetime.date(2020, 1, 1)
+
+ persist_state_calls = []
+
+ # Since the set this gets is mutated, we need to copy it somehow.
+ def persist_state_side_effect(test_cases_to_persist, state_file_path,
+ most_recent_date):
+ self.assertEqual(state_file_path, real_state_file_path)
+ persist_state_calls.append(
+ (sorted(test_cases_to_persist), most_recent_date))
+
+ persist_state_mock.side_effect = persist_state_side_effect
+
+ upload_lexan_crashes_to_forcey.submit_new_test_cases(
+ last_seen_test_cases=(
+ 'gs://test-case-0',
+ 'gs://test-case-1',
+ ),
+ earliest_date_to_check=earliest_date,
+ forcey=forcey_path,
+ state_file_path=real_state_file_path,
+ )
+
+ test_cases_on_or_after_mock.assert_called_once_with(earliest_date)
+ self.assertEqual(submit_test_case_mock.call_args_list, [
+ unittest.mock.call('gs://test-case-1.1', forcey_path),
+ unittest.mock.call('gs://test-case-2', forcey_path),
+ unittest.mock.call('gs://test-case-3', forcey_path),
+ unittest.mock.call('gs://test-case-4', forcey_path),
+ ])
+
+ self.assertEqual(persist_state_calls, [
+ (
+ ['gs://test-case-0', 'gs://test-case-1', 'gs://test-case-1.1'],
+ datetime.date(2020, 1, 1),
+ ),
+ (
+ [
+ 'gs://test-case-0',
+ 'gs://test-case-1',
+ 'gs://test-case-1.1',
+ 'gs://test-case-2',
+ ],
+ datetime.date(2020, 1, 2),
+ ),
+ (
+ [
+ 'gs://test-case-0',
+ 'gs://test-case-1',
+ 'gs://test-case-1.1',
+ 'gs://test-case-2',
+ 'gs://test-case-3',
+ ],
+ datetime.date(2020, 1, 2),
+ ),
+ (
+ [
+ 'gs://test-case-0',
+ 'gs://test-case-1',
+ 'gs://test-case-1.1',
+ 'gs://test-case-2',
+ 'gs://test-case-3',
+ 'gs://test-case-4',
+ ],
+ datetime.date(2020, 1, 4),
+ ),
+ (
+ [
+ 'gs://test-case-1',
+ 'gs://test-case-1.1',
+ 'gs://test-case-2',
+ 'gs://test-case-3',
+ 'gs://test-case-4',
+ ],
+ datetime.date(2020, 1, 4),
+ ),
+ ])
+
+
+if __name__ == '__main__':
+ unittest.main()